diff --git a/.golangci.yml b/.golangci.yml index 020a7ff63..c364617dd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,40 +1,32 @@ -issues: - exclude-files: - - ".*gen.go" - - ".pb.go" - - ".pb.validate.go" - - ".*/generated_[A-Za-z_]*\\.go" - exclude-dirs: - - ".*fakes" # Skip mock directories -run: - timeout: 3m +version: "2" linters: - # By default, the enabled linters are: - # errcheck, gosimple, govet, ineffassign, staticcheck, unused enable: - bodyclose - - cyclop - - copyloopvar - contextcheck + - copyloopvar + - cyclop - dupl + - decorder - errorlint - exhaustive - forcetypeassert + - funcorder - gocheckcompilerdirectives - gocognit - goconst - gocritic - gocyclo - godox - - gofumpt - goheader - gomoddirectives - gosec - grouper - importas - inamedparam + - intrange - ireturn - lll + - loggercheck - maintidx - makezero - mirror @@ -47,7 +39,9 @@ linters: - nilnil - nlreturn - noctx +# - nolintlint - nosprintfhostport + - perfsprint - prealloc - predeclared - promlinter @@ -56,815 +50,585 @@ linters: - revive - rowserrcheck - sloglint + - spancheck - sqlclosecheck - - stylecheck + - staticcheck - tagalign - testableexamples - testifylint - thelper - unconvert - unparam + - usetesting - usestdlibvars - wastedassign - whitespace - zerologlint + settings: + cyclop: + max-complexity: 10 + package-average: 0 + errorlint: + errorf: true + errorf-multi: false + asserts: false + comparison: false + gocognit: + min-complexity: 20 + goconst: + match-constant: true + min-len: 2 + min-occurrences: 3 + numbers: false + min: 3 + max: 3 + ignore-calls: true + gocritic: + settings: + captLocal: + paramsOnly: false + elseif: + skipBalanced: true + ifElseChain: + minThreshold: 4 + gocyclo: + min-complexity: 20 + godox: + keywords: + - TODO + - BUG + - FIXME + - HACK + goheader: + template: |- + Copyright (c) F5, Inc. -# Allows configuring linters that support configuration. -# Refer to full list of linters here: https://golangci-lint.run/usage/linters/. -linters-settings: - unused: - # Mark all struct fields that have been written to as used. Default: true - field-writes-are-uses: true - # Treat IncDec statement (e.g. `i++` or `i--`) as both read and write operation instead of just wr◊ite. Default: false - post-statements-are-reads: true - # Mark all exported fields as used. Default: true - exported-fields-are-used: false - # Mark all function parameters as used. Default: true - parameters-are-used: true - # Mark all local variables as used. Default: true - local-variables-are-used: false - # Mark all identifiers inside generated files as used. Default: true - generated-is-used: true - cyclop: - # The maximal code complexity to report. Default: 10 - max-complexity: 10 - # The maximal average package complexity. If it's higher than 0.0 (float) the check is enabled. Default: 0.0 - package-average: 0.0 - skip-tests: true - govet: - settings: - shadow: - # Report about shadowed variables. Default: false - strict: true - # Each analyzer can be configured separately in the 'settings:' section. - # settings: - # Run `go tool vet help` to see all analyzers. Default: false - enable-all: true - errorlint: - # Check whether fmt.Errorf uses the %w verb for formatting errors. - # See the https://github.com/polyfloyd/go-errorlint for caveats. Default: true - errorf: true - # Permit more than 1 %w verb, valid per Go 1.20 (Requires errorf:true) Default: true - errorf-multi: false - # Check for plain type assertions and type switches. Default: true - asserts: false - # Check for plain error comparisons. Default: true - comparison: false - gci: - # Enable custom order of sections. If `true`, make the section order the same as the order of `sections`. - # Default: false - custom-order: true - # Section configuration to compare against. - # Section names are case-insensitive and may contain parameters in (). - # The default order of sections is `standard > default > custom > blank > dot`, - # If `custom-order` is `true`, it follows the order of `sections` option. - # Default: ["standard", "default"] - sections: - - standard # Standard section: captures all standard packages. - - default # Default section: contains all imports that could not be matched to another section type. - - prefix(github.com/nginx/agent) # Custom section: groups all imports with the specified Prefix. - - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled. - - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled. - # Skip generated files. - # Default: true - skip-generated: true - gocognit: - # Minimal code complexity to report. Default: 30 (but we recommend 10-20) - min-complexity: 20 - goconst: - # Minimal length of string constant. Default: 3 - min-len: 2 - # Minimum occurrences of constant string count to trigger issue. Default: 3 - min-occurrences: 3 - # Ignore test files. Default: false - ignore-tests: true - # Look for existing constants matching the values. Default: true - match-constant: true - # Search also for duplicated numbers. Default: false - numbers: false - # Minimum value, only works with goconst.numbers. Default: 3 - min: 3 - # Maximum value, only works with goconst.numbers. Default: 3 - max: 3 - # Ignore when constant is not used as function argument. Default: true - ignore-calls: true - gocritic: - # Which checks should be enabled; can't be combined with 'disabled-checks'. - # See https://go-critic.github.io/overview#checks-overview. - # To check which checks are enabled run `GL_DEBUG=gocritic golangci-lint run`. - # By default, list of stable checks is used. - # enabled-checks: - # - nestingReduce - # - unnamedResult - # - ruleguard - # - truncateCmp - # Which checks should be disabled; can't be combined with 'enabled-checks'. - # Default: [] - disabled-checks: [] - # Settings passed to gocritic. - # The settings key is the name of a supported gocritic checker. - # The list of supported checkers can be find in https://go-critic.github.io/overview. - settings: - # Must be valid enabled check name. - captLocal: - # Whether to restrict checker to params only. Default: true - paramsOnly: false - elseif: - # Whether to skip balanced if-else pairs. Default: true - skipBalanced: true - ifElseChain: - # Min number of if-else blocks that makes the warning trigger. Default: 2 - minThreshold: 4 - gocyclo: - # Minimal code complexity to report. Default: 30 (but we recommend 10-20) - min-complexity: 20 - godox: - # Report any comments starting with keywords, this is useful for TODO or FIXME comments that - # might be left in the code accidentally and should be resolved before merging. Default: ["TODO", "BUG", "FIXME"] - keywords: - - TODO - - BUG - - FIXME - - HACK - gofumpt: - # Module path which contains the source code being formatted. Default: "" - module-path: github.com/nginx/agent - # Choose whether to use the extra rules. Default: false - extra-rules: true - goheader: - # The template use for checking. - # Default: "" - template: |- - Copyright (c) F5, Inc. - - This source code is licensed under the Apache License, Version 2.0 license found in the - LICENSE file in the root directory of this source tree. - gomoddirectives: - # Allow local `replace` directives. Default: false - replace-local: false - # List of allowed `replace` directives. Default: [] - replace-allow-list: [] - # Allow to not explain why the version has been retracted in the `retract` directives. Default: false - retract-allow-no-explanation: false - # Forbid the use of the `exclude` directives. Default: false - exclude-forbidden: false - gosec: - # To select a subset of rules to run. - # Available rules: https://github.com/securego/gosec#available-rules. Default: [] - means include all rules - includes: - - G101 # Look for hard coded credentials - - G102 # Bind to all interfaces - - G103 # Audit the use of unsafe block - - G104 # Audit errors not checked - - G106 # Audit the use of ssh.InsecureIgnoreHostKey - - G107 # Url provided to HTTP request as taint input - - G108 # Profiling endpoint automatically exposed on /debug/pprof - - G109 # Potential Integer overflow made by strconv.Atoi result conversion to int16/32 - - G110 # Potential DoS vulnerability via decompression bomb - - G111 # Potential directory traversal - - G112 # Potential slowloris attack - - G113 # Usage of Rat.SetString in math/big with an overflow (CVE-2022-23772) - - G114 # Use of net/http serve function that has no support for setting timeouts - - G201 # SQL query construction using format string - - G202 # SQL query construction using string concatenation - - G203 # Use of unescaped data in HTML templates - - G204 # Audit use of command execution - - G301 # Poor file permissions used when creating a directory - - G302 # Poor file permissions used with chmod - - G303 # Creating tempfile using a predictable path - - G304 # File path provided as taint input - - G305 # File traversal when extracting zip/tar archive - - G306 # Poor file permissions used when writing to a new file - - G307 # Poor file permissions used when creating a file with host.Create - - G401 # Detect the usage of DES, RC4, MD5 or SHA1 - - G402 # Look for bad TLS connection settings - - G403 # Ensure minimum RSA key length of 2048 bits - - G404 # Insecure random number source (rand) - - G501 # Import blocklist: crypto/md5 - - G502 # Import blocklist: crypto/des - - G503 # Import blocklist: crypto/rc4 - - G504 # Import blocklist: net/http/cgi - - G505 # Import blocklist: crypto/sha1 - - G602 # Slice access out of bounds - # Exclude generated files - # Default: false - exclude-generated: true - # Filter out the issues with a lower severity than the given value. - # Valid options are: low, medium, high. - # Default: low - severity: low - # Filter out the issues with a lower confidence than the given value. - # Valid options are: low, medium, high. Default: low - confidence: medium - # To specify the configuration of rules. - config: - # Globals are applicable to all rules. - global: - # If true, ignore #nosec in comments (and an alternative as well). - # Default: false - nosec: false - # Add an alternative comment prefix to #nosec (both will work at the same time). Default: "" - "#nosec": "" - # Define whether nosec issues are counted as finding or not. Default: false - show-ignored: false - # Audit mode enables addition checks that for normal code analysis might be too nosy. Default: false - audit: false - G101: - # Regexp pattern for variables and constants to find. - # Default: "(?i)passwd|pass|password|pwd|secret|token|pw|apiKey|bearer|cred" - pattern: "(?i)passwd|pass|password|pwd|secret|token|pw|apiKey|bearer|cred" - # If true, complain about all cases (even with low entropy). Default: false - ignore_entropy: false - # Maximum allowed entropy of the string. Default: "80.0" - entropy_threshold: "80.0" - # Maximum allowed value of entropy/string length. - # Is taken into account if entropy >= entropy_threshold/2. Default: "3.0" - per_char_threshold: "3.0" - # Calculate entropy for first N chars of the string. Default: "16" - truncate: "16" - G111: - # Regexp pattern to find potential directory traversal. Default: "http\\.Dir\\(\"\\/\"\\)|http\\.Dir\\('\\/'\\)" - pattern: "http\\.Dir\\(\"\\/\"\\)|http\\.Dir\\('\\/'\\)" - # Maximum allowed permissions mode for host.Mkdir and host.MkdirAll. Default: "0750" - G301: "0750" - # Maximum allowed permissions mode for host.OpenFile and host.Chmod. Default: "0600" - G302: "0600" - # Maximum allowed permissions mode for host.WriteFile and ioutil.WriteFile. Default: "0600" - G306: "0600" - grouper: - # Require the use of a single global 'const' declaration only. Default: false - const-require-single-const: false - # Require the use of grouped global 'const' declarations. Default: false - const-require-grouping: false - # Require the use of a single 'import' declaration only. Default: false - import-require-single-import: false - # Require the use of grouped 'import' declarations. Default: false - import-require-grouping: false - # Require the use of a single global 'type' declaration only. Default: false - type-require-single-type: false - # Require the use of grouped global 'type' declarations. Default: false - type-require-grouping: false - # Require the use of a single global 'var' declaration only. Default: false - var-require-single-var: false - # Require the use of grouped global 'var' declarations. - # Default: false - var-require-grouping: false - importas: - # Do not allow unaliased imports of aliased packages. Default: false - no-unaliased: true - # Do not allow non-required aliases. Default: false - no-extra-aliases: false - # List of aliases. Default: [] - alias: - - pkg: "go.opentelemetry.io/otel/sdk/metric" - alias: "metricSdk" - interfacebloat: - # The maximum number of methods allowed for an interface. Default: 10 - max: 10 - ireturn: - # ireturn does not allow using `allow` and `reject` settings at the same time. - # Both settings are lists of the keywords and regular expressions matched to interface or package names. - # keywords: - # - `empty` for `interface{}` - # - `error` for errors - # - `stdlib` for standard library - # - `anon` for anonymous interfaces - # - `generic` for generic interfaces added in go 1.18 - # By default, it allows using errors, empty interfaces, anonymous interfaces, - # and interfaces provided by the standard library. - allow: - - anon - - error - - empty - - stdlib - - google.golang.org/grpc/credentials - - github.com/testcontainers/testcontainers-go - - google.golang.org/grpc - lll: - # Max line length, lines longer will be reported. - # '\t' is counted as 1 character by default, and can be changed with the tab-width option. - # Default: 120. - line-length: 120 - # Tab width in spaces. - # Default: 1 - tab-width: 4 - maintidx: - # Show functions with maintainability index lower than N. - # A high index indicates better maintainability (it's kind of the opposite of complexity). Default: 20 - under: 20 - makezero: - # Allow only slices initialized with a length of zero. Default: false - always: false - misspell: - # Correct spellings using locale preferences for US or UK. - # Setting locale to US will correct the British spelling of 'colour' to 'color'. - # Default is to use a neutral variety of English. - locale: "US" - # Default: [] - ignore-words: [] - nakedret: - # Make an issue if func has more lines of code than this setting, and it has naked returns. Default: 30 - max-func-lines: 31 - nestif: - # Minimal complexity of if statements to report. Default: 5 - min-complexity: 5 - nilnil: - # Checks that there is no simultaneous return of `nil` error and an invalid value. - # Default: ["ptr", "func", "iface", "map", "chan"] - checked-types: - - ptr - - func - - iface - - map - - chan - nlreturn: - # Size of the block (including return statement that is still "OK") so no return split required. Default: 1 - block-size: 2 - prealloc: - # IMPORTANT: we don't recommend using this linter before doing performance profiling. - # For most programs usage of prealloc will be a premature optimization. - # Report pre-allocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. - # Default: true - simple: true - # Report pre-allocation suggestions on range loops. - # Default: true - range-loops: true - # Report pre-allocation suggestions on for loops. - # Default: false - for-loops: false - predeclared: - # Comma-separated list of predeclared identifiers to not report on. Default: "" - ignore: "" - # Include method names and field names (i.e., qualified names) in checks. Default: false - q: false - promlinter: - # Promlinter cannot infer all metrics name in static analysis. - # Enable strict mode will also include the errors caused by failing to parse the args. - # Default: false - strict: false - # Please refer to https://github.com/yeya24/promlinter#usage for detailed usage. - # Default: [] - disabled-linters: [] - reassign: - # Patterns for global variable names that are checked for reassignment. - # See https://github.com/curioswitch/go-reassign#usage - # Default: ["EOF", "Err.*"] - patterns: - - "Err.*" - - "EOF" - revive: - # Maximum number of open files at the same time. Defaults to unlimited. - max-open-files: 2048 - # When set to false, ignores files with "GENERATED" header, similar to golint. Default: false - ignore-generated-header: false - # Sets the default severity. Default: warning - severity: warning - # Enable all available rules. Default: false - enable-all-rules: false - # Sets the default failure confidence. This means that linting errors with less than 0.8 confidence will be ignored. - # Default: 0.8 - confidence: 0.8 + This source code is licensed under the Apache License, Version 2.0 license found in the + LICENSE file in the root directory of this source tree. + gomoddirectives: + replace-local: false + exclude-forbidden: false + retract-allow-no-explanation: false + gosec: + includes: + - G101 + - G102 + - G103 + - G104 + - G106 + - G107 + - G108 + - G109 + - G110 + - G111 + - G112 + - G113 + - G114 + - G201 + - G202 + - G203 + - G204 + - G301 + - G302 + - G303 + - G304 + - G305 + - G306 + - G307 + - G401 + - G402 + - G403 + - G404 + - G501 + - G502 + - G503 + - G504 + - G505 + - G602 + severity: low + confidence: medium + config: + G101: + entropy_threshold: "80.0" + ignore_entropy: false + pattern: (?i)passwd|pass|password|pwd|secret|token|pw|apiKey|bearer|cred + per_char_threshold: "3.0" + truncate: "16" + G111: + pattern: http\.Dir\("\/"\)|http\.Dir\('\/'\) + G301: "0750" + G302: "0600" + G306: "0600" + global: + '#nosec': "" + audit: false + nosec: false + show-ignored: false + govet: + enable-all: true + settings: + shadow: + strict: true + grouper: + const-require-single-const: false + const-require-grouping: false + import-require-single-import: false + import-require-grouping: false + type-require-single-type: false + type-require-grouping: false + var-require-single-var: false + var-require-grouping: false + importas: + alias: + - pkg: go.opentelemetry.io/otel/sdk/metric + alias: metricSdk + no-unaliased: true + no-extra-aliases: false + interfacebloat: + max: 10 + ireturn: + allow: + - anon + - error + - empty + - stdlib + - google.golang.org/grpc/credentials + - github.com/testcontainers/testcontainers-go + - google.golang.org/grpc + lll: + line-length: 120 + tab-width: 4 + maintidx: + under: 20 + makezero: + always: false + misspell: + locale: US + nakedret: + max-func-lines: 31 + nestif: + min-complexity: 5 + nilnil: + checked-types: + - ptr + - func + - iface + - map + - chan + nlreturn: + block-size: 2 +# nolintlint: +# # Disable to ensure that all nolint directives actually have an effect. +# # Default: false +# allow-unused: true +# # Exclude following linters from requiring an explanation. +# # Default: [] +# allow-no-explanation: [ ] +# # Enable to require an explanation of nonzero length after each nolint directive. +# # Default: false +# require-explanation: true +# # Enable to require nolint directives to mention the specific linter being suppressed. +# # Default: false +# require-specific: true + prealloc: + simple: true + range-loops: true + for-loops: false + predeclared: + qualified-name: false + promlinter: + strict: false + reassign: + patterns: + - Err.* + - EOF + revive: + max-open-files: 2048 + confidence: 0.8 + severity: warning + enable-all-rules: false + rules: + - name: add-constant + arguments: + - allowFloats: 0.0,0.,1.0,1.,2.0,2. + allowInts: 0,1,2 + allowStrs: '""' + ignoreFuncs: host\.* + maxLitCount: "3" + severity: warning + disabled: true + - name: argument-limit + arguments: + - 5 + severity: warning + disabled: false + - name: atomic + severity: warning + disabled: false + - name: banned-characters + arguments: + - Ω + - Σ + - σ + severity: warning + disabled: false + - name: bare-return + severity: warning + disabled: false + - name: blank-imports + severity: warning + disabled: false + - name: bool-literal-in-expr + severity: warning + disabled: false + - name: call-to-gc + severity: warning + disabled: false + - name: cognitive-complexity + arguments: + - 10 + severity: warning + disabled: false + - name: comment-spacings + severity: warning + disabled: true + - name: confusing-naming + severity: warning + disabled: false + - name: confusing-results + severity: warning + disabled: false + - name: constant-logical-expr + severity: warning + disabled: false + - name: context-as-argument + arguments: + - allowTypesBefore: '*testing.T' + severity: warning + disabled: false + - name: context-keys-type + severity: warning + disabled: false + - name: cyclomatic + arguments: + - 10 + severity: warning + disabled: false + - name: datarace + severity: warning + disabled: false + - name: deep-exit + severity: warning + disabled: false + - name: defer + arguments: + - - call-chain + - loop + severity: warning + disabled: false + - name: dot-imports + severity: warning + disabled: false + - name: duplicated-imports + severity: warning + disabled: false + - name: early-return + arguments: + - preserveScope + severity: warning + disabled: false + - name: empty-block + severity: warning + disabled: false + - name: empty-lines + severity: warning + disabled: false + - name: enforce-map-style + arguments: + - make + severity: warning + disabled: false + - name: error-naming + severity: warning + disabled: false + - name: error-return + severity: warning + disabled: false + - name: error-strings + severity: warning + disabled: false + - name: errorf + severity: warning + disabled: false + - name: flag-parameter + severity: warning + disabled: false + - name: function-result-limit + arguments: + - 3 + severity: warning + disabled: false + - name: get-return + severity: warning + disabled: false + - name: identical-branches + severity: warning + disabled: false + - name: if-return + severity: warning + disabled: false + - name: increment-decrement + severity: warning + disabled: false + - name: indent-error-flow + arguments: + - preserveScope + severity: warning + disabled: false + - name: import-alias-naming + arguments: + - ^[a-z][a-zA-Z0-9]{0,}$ + severity: warning + disabled: false + - name: imports-blocklist + arguments: + - crypto/md5 + - crypto/sha1 + severity: warning + disabled: false + - name: import-shadowing + severity: warning + disabled: false + - name: max-public-structs + arguments: + - 3 + severity: warning + disabled: true + - name: modifies-parameter + severity: warning + disabled: false + - name: modifies-value-receiver + severity: warning + disabled: false + - name: nested-structs + severity: warning + disabled: false + - name: optimize-operands-order + severity: warning + disabled: false + - name: package-comments + severity: warning + disabled: false + - name: range + severity: warning + disabled: false + - name: range-val-in-closure + severity: warning + disabled: false + - name: range-val-address + severity: warning + disabled: false + - name: receiver-naming + severity: warning + disabled: false + - name: redundant-import-alias + severity: warning + disabled: false + - name: redefines-builtin-id + severity: warning + disabled: false + - name: string-of-int + severity: warning + disabled: false + - name: string-format + arguments: + - - core.WriteError[1].Message + - /^([^A-Z]|$)/ + - must not start with a capital letter + - - fmt.Errorf[0] + - /(^|[^\.!?])$/ + - must not end in punctuation + - - panic + - /^[^\n]*$/ + - must not contain line breaks + severity: warning + disabled: true + - name: struct-tag + arguments: + - json,inline + - bson,outline,gnu + severity: warning + disabled: false + - name: superfluous-else + arguments: + - preserveScope + severity: warning + disabled: false + - name: time-equal + severity: warning + disabled: false + - name: time-naming + severity: warning + disabled: false + - name: var-naming + arguments: + - - ID + - - VM + - - upperCaseConst: true + severity: warning + disabled: true + - name: var-declaration + severity: warning + disabled: false + - name: unconditional-recursion + severity: warning + disabled: false + - name: unexported-naming + severity: warning + disabled: false + - name: unexported-return + severity: warning + disabled: false + - name: unhandled-error + arguments: + - fmt.Printf + - myFunction + severity: warning + disabled: true + - name: unnecessary-stmt + severity: warning + disabled: false + - name: unreachable-code + severity: warning + disabled: false + - name: unused-parameter + arguments: + - allowRegex: ^_ + severity: warning + disabled: true + - name: unused-receiver + arguments: + - allowRegex: ^_ + severity: warning + disabled: true + - name: useless-break + severity: warning + disabled: false + - name: waitgroup-by-value + severity: warning + disabled: false + rowserrcheck: + packages: + - github.com/jmoiron/sqlx + sloglint: + no-mixed-args: true + kv-only: false + attr-only: false + context: "scope" + static-msg: false + no-raw-keys: false + key-naming-case: snake + args-on-sep-lines: false + tagalign: + align: true + sort: true + order: + - json + - yaml + - yml + - toml + - mapstructure + - binding + - validate + strict: false + testifylint: + enable-all: true + suite-extra-assert-call: + mode: remove + thelper: + test: + first: true + name: false + begin: true + fuzz: + first: true + name: false + begin: true + benchmark: + first: true + name: false + begin: true + tb: + first: true + name: false + begin: true + unparam: + check-exported: true + unused: + field-writes-are-uses: true + post-statements-are-reads: true + exported-fields-are-used: false + parameters-are-used: true + local-variables-are-used: false + generated-is-used: true + usestdlibvars: + http-method: true + http-status-code: true + time-weekday: true + time-month: true + time-layout: true + crypto-hash: true + default-rpc-path: true + sql-isolation-level: true + tls-signature-scheme: true + constant-kind: true + whitespace: + multi-if: false + multi-func: false + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#add-constant: - - name: add-constant - severity: warning - disabled: true - arguments: - - maxLitCount: "3" - allowStrs: '""' - allowInts: "0,1,2" - allowFloats: "0.0,0.,1.0,1.,2.0,2." - ignoreFuncs: "host\\.*" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#argument-limit - - name: argument-limit - severity: warning - disabled: false - arguments: [ 5 ] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#atomic - - name: atomic - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#banned-characters - - name: banned-characters - severity: warning - disabled: false - arguments: [ "Ω", "Σ", "σ"] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bare-return - - name: bare-return - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - - name: blank-imports - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - - name: bool-literal-in-expr - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#call-to-gc - - name: call-to-gc - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#cognitive-complexity - - name: cognitive-complexity - severity: warning - disabled: false - arguments: [ 10 ] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#comment-spacings - - name: comment-spacings - severity: warning - disabled: true - arguments: [] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#confusing-naming - - name: confusing-naming - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#confusing-results - - name: confusing-results - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - - name: constant-logical-expr - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument - - name: context-as-argument - severity: warning - disabled: false - arguments: - - allowTypesBefore: "*testing.T" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type - - name: context-keys-type - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#cyclomatic - - name: cyclomatic - severity: warning - disabled: false - arguments: [ 10 ] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#datarace - - name: datarace - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - - name: deep-exit - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - - name: defer - severity: warning - disabled: false - arguments: - - [ "call-chain", "loop" ] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - - name: dot-imports - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - - name: duplicated-imports - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - - name: early-return - severity: warning - disabled: false - arguments: - - "preserveScope" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block - - name: empty-block - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - - name: empty-lines - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#enforce-map-style - - name: enforce-map-style - severity: warning - disabled: false - arguments: - - "make" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - - name: error-naming - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - - name: error-return - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - - name: error-strings - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - - name: errorf - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - - name: flag-parameter - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#function-result-limit - - name: function-result-limit - severity: warning - disabled: false - arguments: [ 3 ] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#get-return - - name: get-return - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - - name: identical-branches - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - - name: if-return - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement - - name: increment-decrement - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - - name: indent-error-flow - severity: warning - disabled: false - arguments: - - "preserveScope" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-alias-naming - - name: import-alias-naming - severity: warning - disabled: false - arguments: - - "^[a-z][a-zA-Z0-9]{0,}$" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#imports-blacklist - - name: imports-blacklist - severity: warning - disabled: false - arguments: - - "crypto/md5" - - "crypto/sha1" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - - name: import-shadowing - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#max-public-structs - - name: max-public-structs - severity: warning - disabled: true - arguments: [ 3 ] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#modifies-parameter - - name: modifies-parameter - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#modifies-value-receiver - - name: modifies-value-receiver - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#nested-structs - - name: nested-structs - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#optimize-operands-order - - name: optimize-operands-order - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments - - name: package-comments - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - - name: range - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - - name: range-val-in-closure - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - - name: range-val-address - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#receiver-naming - - name: receiver-naming - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redundant-import-alias - - name: redundant-import-alias - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - - name: redefines-builtin-id - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-of-int - - name: string-of-int - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - - name: string-format - severity: warning - disabled: true - arguments: - - - 'core.WriteError[1].Message' - - '/^([^A-Z]|$)/' - - must not start with a capital letter - - - 'fmt.Errorf[0]' - - '/(^|[^\.!?])$/' - - must not end in punctuation - - - panic - - '/^[^\n]*$/' - - must not contain line breaks - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - - name: struct-tag - arguments: - - "json,inline" - - "bson,outline,gnu" - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - - name: superfluous-else - severity: warning - disabled: false - arguments: - - "preserveScope" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - - name: time-equal - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-naming - - name: time-naming - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - - name: var-naming - severity: warning - disabled: true - arguments: - - [ "ID" ] # AllowList - - [ "VM" ] # DenyList - - - upperCaseConst: true - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - - name: var-declaration - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion - - name: unconditional-recursion - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-naming - - name: unexported-naming - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - - name: unexported-return - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - - name: unhandled-error - severity: warning - disabled: true - arguments: - - "fmt.Printf" - - "myFunction" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - - name: unnecessary-stmt - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unreachable-code - - name: unreachable-code - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter - - name: unused-parameter - severity: warning - disabled: true - arguments: - - allowRegex: "^_" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-receiver - - name: unused-receiver - severity: warning - disabled: true - arguments: - - allowRegex: "^_" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - - name: useless-break - severity: warning - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - - name: waitgroup-by-value - severity: warning - disabled: false - rowserrcheck: - # database/sql is always checked - # Default: [] - packages: - - github.com/jmoiron/sqlx - sloglint: - # Enforce not mixing key-value pairs and attributes. Default: true - no-mixed-args: true - # Enforce using key-value pairs only (overrides no-mixed-args, incompatible with attr-only). Default: false - kv-only: false - # Enforce using attributes only (overrides no-mixed-args, incompatible with kv-only). Default: false - attr-only: false - # Enforce using methods that accept a context. Default: false - context: "" - # Enforce using static values for log messages. Default: false - static-msg: false - # Enforce using constants instead of raw keys. Default: false - no-raw-keys: false - # Enforce a single key naming convention. Values: snake, kebab, camel, pascal. Default: "" - key-naming-case: snake - # Enforce putting arguments on separate lines. Default: false - args-on-sep-lines: false - tagalign: - # Whether enable align. If true, the struct tags will be aligned. Default: true. - align: true - # If true, the tags will be sorted by name in ascending order. - # eg: `xml:"bar" json:"bar" validate:"required"` -> `json:"bar" validate:"required" xml:"bar"` - # Default: true - sort: true - # Specify the order of tags, the other tags will be sorted by name. - # This option will be ignored if `sort` is false. - # Default: [] - order: - - json - - yaml - - yml - - toml - - mapstructure - - binding - - validate - # Whether enable strict style. - # In this style, the tags will be sorted and aligned in the dictionary order, - # and the tags with the same name will be aligned together. - # Note: This option will be ignored if 'align' or 'sort' is false. - # Default: false - strict: false - testifylint: - # Enable all checkers (https://github.com/Antonboom/testifylint#checkers). - # Default: false - enable-all: true - suite-extra-assert-call: - # To require or remove extra Assert() call? - # Default: remove - mode: remove - thelper: - test: - # Check *testing.T is first param (or after context.Context) of helper function. - # Default: true - first: true - # Check *testing.T param has name t. Default: true - name: false - # Check t.Helper() begins helper function. Default: true - begin: true - benchmark: - # Check *testing.B is first param (or after context.Context) of helper function. Default: true - first: true - # Check *testing.B param has name b. Default: true - name: false - # Check b.Helper() begins helper function. Default: true - begin: true - tb: - # Check *testing.TB is first param (or after context.Context) of helper function. Default: true - first: true - # Check *testing.TB param has name tb. Default: true - name: false - # Check tb.Helper() begins helper function. Default: true - begin: true - fuzz: - # Check *testing.F is first param (or after context.Context) of helper function. Default: true - first: true - # Check *testing.F param has name f. Default: true - name: false - # Check f.Helper() begins helper function. Default: true - begin: true - unparam: - # Inspect exported functions. Default: false - check-exported: true - usestdlibvars: - # Suggest the use of http.MethodXX. Default: true - http-method: true - # Suggest the use of http.StatusXX. Default: true - http-status-code: true - # Suggest the use of time.Weekday.String(). Default: true - time-weekday: true - # Suggest the use of time.Month.String(). Default: false - time-month: true - # Suggest the use of time.Layout. Default: false - time-layout: true - # Suggest the use of crypto.Hash.String(). Default: false - crypto-hash: true - # Suggest the use of rpc.DefaultXXPath. Default: false - default-rpc-path: true - # Suggest the use of sql.LevelXX.String(). Default: false - sql-isolation-level: true - # Suggest the use of tls.SignatureScheme.String(). Default: false - tls-signature-scheme: true - # Suggest the use of constant.Kind.String(). Default: false - constant-kind: true - whitespace: - # Enforces newlines (or comments) after every multi-line if statement. Default: false - multi-if: false - # Enforces newlines (or comments) after every multi-line function signature. Default: false - multi-func: false + - linters: + - cyclop + - goconst + path: (.+)_test\.go + paths: + - .*gen.go + - .pb.go + - .pb.validate.go + - .*/generated_[A-Za-z_]*\.go + - .*fakes + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gofumpt + settings: + gci: + sections: + - standard + - default + - prefix(github.com/nginx/agent) + - blank + - dot + custom-order: true + gofumpt: + module-path: github.com/nginx/agent + extra-rules: true + exclusions: + generated: lax + paths: + - .*gen.go + - .pb.go + - .pb.validate.go + - .*/generated_[A-Za-z_]*\.go + - .*fakes + - third_party$ + - builtin$ + - examples$ diff --git a/Makefile b/Makefile index d27ae81da..aaf6b6168 100644 --- a/Makefile +++ b/Makefile @@ -161,13 +161,13 @@ integration-test: $(SELECTED_PACKAGE) build-mock-management-plane-grpc TEST_ENV="Container" CONTAINER_OS_TYPE=$(CONTAINER_OS_TYPE) BUILD_TARGET="install-agent-local" CONTAINER_NGINX_IMAGE_REGISTRY=${CONTAINER_NGINX_IMAGE_REGISTRY} \ PACKAGES_REPO=$(OSS_PACKAGES_REPO) PACKAGE_NAME=$(PACKAGE_NAME) BASE_IMAGE=$(BASE_IMAGE) DOCKERFILE_PATH=$(DOCKERFILE_PATH) IMAGE_PATH=$(IMAGE_PATH) TAG=${IMAGE_TAG} \ OS_VERSION=$(OS_VERSION) OS_RELEASE=$(OS_RELEASE) \ - go test -v ./test/integration/installuninstall ./test/integration/managementplane ./test/integration/nginxless + go test -v ./test/integration/installuninstall ./test/integration/managementplane ./test/integration/auxiliarycommandserver ./test/integration/nginxless official-image-integration-test: $(SELECTED_PACKAGE) build-mock-management-plane-grpc TEST_ENV="Container" CONTAINER_OS_TYPE=$(CONTAINER_OS_TYPE) CONTAINER_NGINX_IMAGE_REGISTRY=${CONTAINER_NGINX_IMAGE_REGISTRY} BUILD_TARGET="install" \ PACKAGES_REPO=$(OSS_PACKAGES_REPO) TAG=${TAG} PACKAGE_NAME=$(PACKAGE_NAME) BASE_IMAGE=$(BASE_IMAGE) DOCKERFILE_PATH=$(OFFICIAL_IMAGE_DOCKERFILE_PATH) \ OS_VERSION=$(OS_VERSION) OS_RELEASE=$(OS_RELEASE) IMAGE_PATH=$(IMAGE_PATH) \ - go test -v ./test/integration/managementplane + go test -v ./test/integration/managementplane ./test/integration/auxiliarycommandserver performance-test: @mkdir -p $(TEST_BUILD_DIR) diff --git a/Makefile.packaging b/Makefile.packaging index 754e4ed91..74b6f336f 100644 --- a/Makefile.packaging +++ b/Makefile.packaging @@ -9,23 +9,18 @@ AZURE_PACKAGES_DIR := ./build/azure/packages BINARY_PATH := $(BUILD_DIR)/$(BINARY_NAME) GPG_PUBLIC_KEY := .key PACKAGE_BUILD ?= 1 -PACKAGE_VERSION := $(shell echo ${VERSION} | tr -d 'v') +PACKAGE_VERSION := $(shell git describe --match "v[0-9]*" --abbrev=0 --tags) TARBALL_NAME := $(PACKAGE_PREFIX).tar.gz -DEB_DISTROS ?= ubuntu-noble-24.04 ubuntu-jammy-22.04 ubuntu-focal-20.04 debian-bookworm-12 debian-bullseye-11 +DEB_DISTROS ?= ubuntu-plucky-25.04 ubuntu-noble-24.04 ubuntu-jammy-22.04 ubuntu-focal-20.04 debian-bookworm-12 debian-bullseye-11 DEB_ARCHS ?= arm64 amd64 -RPM_DISTROS ?= oraclelinux-8-x86_64 oraclelinux-9-x86_64 suse-15-x86_64 +RPM_DISTROS ?= suse-15-x86_64 RPM_ARCH := x86_64 -REDHAT_VERSIONS ?= redhatenterprise-8 redhatenterprise-9 +REDHAT_VERSIONS ?= redhatenterprise-8 redhatenterprise-9 redhatenterprise-10 REDHAT_ARCHS ?= aarch64 x86_64 -ROCKY_VERSIONS ?= rocky-8 rocky-9 -ROCKY_ARCHS ?= aarch64 x86_64 -FREEBSD_DISTROS ?= "FreeBSD:13:amd64" "FreeBSD:14:amd64" APK_VERSIONS ?= 3.18 3.19 3.20 3.21 3.22 APK_ARCHS ?= aarch64 x86_64 APK_REVISION ?= 1 -ALMA_VERSIONS ?= almalinux-8 almalinux-9 -ALMA_ARCHS ?= aarch64 x86_64 AMAZON_VERSIONS ?= amazon-2 amazon-2023 AMAZON_ARCHS ?= aarch64 x86_64 @@ -35,55 +30,38 @@ AMAZON_ARCHS ?= aarch64 x86_64 .PHONY: clean-packages clean-packages: rm -rf $(PACKAGES_DIR) - rm -rf $(GITHUB_PACKAGES_DIR) - rm -rf $(AZURE_PACKAGES_DIR) $(PACKAGES_DIR): - @mkdir -p $(PACKAGES_DIR)/deb && mkdir -p $(PACKAGES_DIR)/rpm && mkdir -p $(PACKAGES_DIR)/apk && mkdir -p $(PACKAGES_DIR)/txz - -$(GITHUB_PACKAGES_DIR): - @mkdir -p $(GITHUB_PACKAGES_DIR) - -$(AZURE_PACKAGES_DIR): - @mkdir -p $(AZURE_PACKAGES_DIR) + @mkdir -p $(PACKAGES_DIR)/deb && mkdir -p $(PACKAGES_DIR)/rpm && mkdir -p $(PACKAGES_DIR)/apk .PHONY: package -package: gpg-key $(PACKAGES_DIR) $(GITHUB_PACKAGES_DIR) $(AZURE_PACKAGES_DIR) #### Create final packages for all supported distros +package: $(PACKAGES_DIR) #### Create final packages for all supported distros # Create deb packages - @for arch in $(DEB_ARCHS); do \ GOWORK=off CGO_ENABLED=0 GOARCH=$${arch} GOOS=linux go build -pgo=auto -ldflags=${LDFLAGS} -o $(BINARY_PATH) $(PROJECT_DIR)/$(PROJECT_FILE); \ for distro in $(DEB_DISTROS); do \ deb_codename=`echo $$distro | cut -d- -f 2`; \ VERSION=$(PACKAGE_VERSION)~$${deb_codename} ARCH=$${arch} nfpm pkg --config .nfpm.yaml --packager deb --target ${PACKAGES_DIR}/deb/${PACKAGE_PREFIX}_$(PACKAGE_VERSION)~$${deb_codename}_$${arch}.deb; \ - cp ${PACKAGES_DIR}/deb/${PACKAGE_PREFIX}_$(PACKAGE_VERSION)~$${deb_codename}_$${arch}.deb ${GITHUB_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION)~$${deb_codename}_$${arch}.deb; \ - cp ${PACKAGES_DIR}/deb/${PACKAGE_PREFIX}_$(PACKAGE_VERSION)~$${deb_codename}_$${arch}.deb ${AZURE_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION)~$${deb_codename}_$${arch}.deb; \ done; \ rm -rf $(BINARY_PATH); \ done; \ # Create rpm packages - @GOWORK=off CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -pgo=auto -ldflags=${LDFLAGS} -o $(BINARY_PATH) $(PROJECT_DIR)/$(PROJECT_FILE) @for distro in $(RPM_DISTROS); do \ rpm_distro=`echo $$distro | cut -d- -f 1`; \ rpm_major=`echo $$distro | cut -d- -f 2`; \ rpm_codename='na'; \ - if [ "$$rpm_distro" = "centos" ] || [ "$$rpm_distro" = "redhatenterprise" ]; then rpm_codename="el$$rpm_major"; \ - elif [ "$$rpm_distro" = "oraclelinux" ]; then rpm_codename="oraclelinux$$rpm_major"; \ - elif [ "$$rpm_distro" = "suse" ]; then rpm_codename="sles$$rpm_major"; \ + if [ "$$rpm_distro" = "suse" ]; then rpm_codename="sles$$rpm_major"; \ fi; \ if [ "$$rpm_codename" != "na" ]; then \ VERSION=$(PACKAGE_VERSION) ARCH=amd64 nfpm pkg --config .nfpm.yaml --packager rpm --target $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.${RPM_ARCH}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$(RPM_ARCH).rpm ${GITHUB_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.${RPM_ARCH}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$(RPM_ARCH).rpm ${AZURE_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.${RPM_ARCH}.rpm; \ fi; \ done; \ rm -rf $(BINARY_PATH) # Create redhat rpm packages - @for arch in $(REDHAT_ARCHS); do \ goarch=amd64; \ if [ "$$arch" = "aarch64" ]; then goarch="arm64"; fi; \ @@ -93,49 +71,12 @@ package: gpg-key $(PACKAGES_DIR) $(GITHUB_PACKAGES_DIR) $(AZURE_PACKAGES_DIR) ## rpm_major=`echo $$distro | cut -d- -f 2`; \ rpm_codename="el$$rpm_major"; \ VERSION=$(PACKAGE_VERSION) ARCH=$${arch} nfpm pkg --config .nfpm.yaml --packager rpm --target $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm ${GITHUB_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm ${AZURE_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ done; \ rm -rf $(BINARY_PATH); \ done; \ - # Create almalinux rpm packages - - @for arch in $(ALMA_ARCHS); do \ - goarch=amd64; \ - if [ "$$arch" = "aarch64" ]; then goarch="arm64"; fi; \ - GOWORK=off CGO_ENABLED=0 GOARCH=$${goarch} GOOS=linux go build -pgo=auto -ldflags=${LDFLAGS} -o $(BINARY_PATH) $(PROJECT_DIR)/$(PROJECT_FILE); \ - for distro in $(ALMA_VERSIONS); do \ - rpm_distro=`echo $$distro | cut -d- -f 1`; \ - rpm_major=`echo $$distro | cut -d- -f 2`; \ - rpm_codename="almalinux$$rpm_major"; \ - VERSION=$(PACKAGE_VERSION) ARCH=$${arch} nfpm pkg --config .nfpm.yaml --packager rpm --target $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm ${GITHUB_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm ${AZURE_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - done; \ - rm -rf $(BINARY_PATH); \ - done; \ - - # Create rocky rpm packages - - @for arch in $(ROCKY_ARCHS); do \ - goarch=amd64; \ - if [ "$$arch" = "aarch64" ]; then goarch="arm64"; fi; \ - GOWORK=off CGO_ENABLED=0 GOARCH=$${goarch} GOOS=linux go build -pgo=auto -ldflags=${LDFLAGS} -o $(BINARY_PATH) $(PROJECT_DIR)/$(PROJECT_FILE); \ - for distro in $(ROCKY_VERSIONS); do \ - rpm_distro=`echo $$distro | cut -d- -f 1`; \ - rpm_major=`echo $$distro | cut -d- -f 2`; \ - rpm_codename='na'; \ - if [ "$$rpm_distro" = "rocky" ]; then rpm_codename="rocky$$rpm_major"; fi; \ - if [ "$$rpm_codename" != "na" ]; then \ - VERSION=$(PACKAGE_VERSION) ARCH=$${arch} nfpm pkg --config .nfpm.yaml --packager rpm --target $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm ${GITHUB_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm ${AZURE_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - fi; \ - done; \ - rm -rf $(BINARY_PATH); \ - done; \ + # Create amazon rpm packages @for arch in $(AMAZON_ARCHS); do \ goarch=amd64; \ if [ "$$arch" = "aarch64" ]; then goarch="arm64"; fi; \ @@ -144,14 +85,11 @@ package: gpg-key $(PACKAGES_DIR) $(GITHUB_PACKAGES_DIR) $(AZURE_PACKAGES_DIR) ## rpm_major=`echo $$version | cut -d- -f 2`; \ rpm_codename="amzn$$rpm_major";\ VERSION=$(PACKAGE_VERSION) ARCH=$${arch} nfpm pkg --config .nfpm.yaml --packager rpm --target $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm ${GITHUB_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ - cp $(PACKAGES_DIR)/rpm/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm ${AZURE_PACKAGES_DIR}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).$${rpm_codename}.ngx.$${arch}.rpm; \ done; \ rm -rf $(BINARY_PATH); \ done; \ # Create apk packages - @for arch in $(APK_ARCHS); do \ goarch=amd64; \ if [ "$$arch" = "aarch64" ]; then goarch="arm64"; fi; \ @@ -159,19 +97,10 @@ package: gpg-key $(PACKAGES_DIR) $(GITHUB_PACKAGES_DIR) $(AZURE_PACKAGES_DIR) ## for version in $(APK_VERSIONS); do \ if [ ! -d "$(PACKAGES_DIR)/apk/v$${version}/$${arch}" ]; then mkdir -p $(PACKAGES_DIR)/apk/v$${version}/$${arch}; fi; \ VERSION=$(PACKAGE_VERSION) ARCH=$${arch} nfpm pkg --config .nfpm.yaml --packager apk --target $(PACKAGES_DIR)/apk/v$${version}/$${arch}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).apk; \ - cp $(PACKAGES_DIR)/apk/v$${version}/$${arch}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).apk ${GITHUB_PACKAGES_DIR}/${PACKAGE_PREFIX}-v$(PACKAGE_VERSION)-r$(APK_REVISION).apk; \ - cp $(PACKAGES_DIR)/apk/v$${version}/$${arch}/${PACKAGE_PREFIX}-$(PACKAGE_VERSION).apk ${AZURE_PACKAGES_DIR}/${PACKAGE_PREFIX}-v$(PACKAGE_VERSION)-r$(APK_REVISION).apk; \ done; \ rm -rf $(BINARY_PATH); \ done; \ - - # Create txz packages - - rm -rf $(BINARY_PATH) - @GOWORK=off CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -pgo=auto -ldflags=${LDFLAGS} -o $(BINARY_PATH) $(PROJECT_DIR)/$(PROJECT_FILE) - - docker run -v ${PWD}:/nginx-agent/ -e VERSION=$(PACKAGE_VERSION) build-signed-packager:1.0.0 - + # Package build complete echo "DEB packages:"; \ @@ -180,17 +109,9 @@ package: gpg-key $(PACKAGES_DIR) $(GITHUB_PACKAGES_DIR) $(AZURE_PACKAGES_DIR) ## find $(PACKAGES_DIR)/rpm ;\ echo "APK packages:"; \ find $(PACKAGES_DIR)/apk ;\ - echo "TXZ packages:"; \ - find $(PACKAGES_DIR)/txz ;\ - echo "Github packages:"; \ - find $(GITHUB_PACKAGES_DIR) ;\ - cd $(PACKAGES_DIR) && tar -czvf "./$(TARBALL_NAME)" * && cd ../.. && cp "${PACKAGES_DIR}/$(TARBALL_NAME)" "${AZURE_PACKAGES_DIR}/$(TARBALL_NAME)"; \ - echo "Azure packages:"; \ - find $(AZURE_PACKAGES_DIR) ; -.PHONY: build-signed-packager -build-signed-packager: - docker build -f scripts/packages/packager/Dockerfile --build-arg package_type=signed-package -t build-signed-packager:1.0.0 . + # Create tarball containing all packages + cd $(PACKAGES_DIR) && tar -czvf "./$(TARBALL_NAME)" * && cd ../.. && cp "${PACKAGES_DIR}/$(TARBALL_NAME)"; \ .PHONY: gpg-key gpg-key: ## Generate GPG public key diff --git a/Makefile.tools b/Makefile.tools index abb037868..512a729a2 100644 --- a/Makefile.tools +++ b/Makefile.tools @@ -1,6 +1,6 @@ OAPICODEGEN = github.com/deepmap/oapi-codegen/v2/cmd/oapi-codegen@v2.1.0 LEFTHOOK = github.com/evilmartians/lefthook@v1.6.9 -GOLANGCILINT = github.com/golangci/golangci-lint/cmd/golangci-lint@v1.64.8 +GOLANGCILINT = github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.2.1 PROTOCGENGO = google.golang.org/protobuf/cmd/protoc-gen-go@v1.32.0 GOFUMPT = mvdan.cc/gofumpt@v0.6.0 COUNTERFEITER = github.com/maxbrunsfeld/counterfeiter/v6@v6.8.1 diff --git a/go.mod b/go.mod index b7d5912b5..1c1886634 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/nginx/agent/v3 -go 1.23.7 +go 1.24.0 -toolchain go1.23.10 +toolchain go1.24.4 require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.4-20250130201111-63bb56e20495.1 diff --git a/internal/bus/message_pipe.go b/internal/bus/message_pipe.go index 0cba3a173..aabba3dfb 100644 --- a/internal/bus/message_pipe.go +++ b/internal/bus/message_pipe.go @@ -147,6 +147,16 @@ func (p *MessagePipe) IsPluginRegistered(pluginName string) bool { return isPluginRegistered } +func (p *MessagePipe) Index(pluginName string, plugins []Plugin) int { + for index, plugin := range plugins { + if pluginName == plugin.Info().Name { + return index + } + } + + return -1 +} + func (p *MessagePipe) unsubscribePlugin(ctx context.Context, index int, plugin Plugin) error { if index != -1 { p.plugins = append(p.plugins[:index], p.plugins[index+1:]...) @@ -181,16 +191,6 @@ func (p *MessagePipe) findPlugins(pluginNames []string) []Plugin { return plugins } -func (p *MessagePipe) Index(pluginName string, plugins []Plugin) int { - for index, plugin := range plugins { - if pluginName == plugin.Info().Name { - return index - } - } - - return -1 -} - func (p *MessagePipe) initPlugins(ctx context.Context) { for index, plugin := range p.plugins { err := plugin.Init(ctx, p) diff --git a/internal/collector/logsgzipprocessor/processor.go b/internal/collector/logsgzipprocessor/processor.go index ce3232ab1..e8d1ca1de 100644 --- a/internal/collector/logsgzipprocessor/processor.go +++ b/internal/collector/logsgzipprocessor/processor.go @@ -95,6 +95,22 @@ func (p *logsGzipProcessor) ConsumeLogs(ctx context.Context, ld plog.Logs) error return p.nextConsumer.ConsumeLogs(ctx, ld) } +func (p *logsGzipProcessor) Capabilities() consumer.Capabilities { + return consumer.Capabilities{ + MutatesData: true, + } +} + +func (p *logsGzipProcessor) Start(ctx context.Context, _ component.Host) error { + p.settings.Logger.Info("Starting logs gzip processor") + return nil +} + +func (p *logsGzipProcessor) Shutdown(ctx context.Context) error { + p.settings.Logger.Info("Shutting down logs gzip processor") + return nil +} + func (p *logsGzipProcessor) processLogRecords(logRecords plog.LogRecordSlice) error { var errs error // Filter out unsupported data types in the log before processing @@ -164,19 +180,3 @@ func (p *logsGzipProcessor) gzipCompress(data []byte) ([]byte, error) { return buf.Bytes(), nil } - -func (p *logsGzipProcessor) Capabilities() consumer.Capabilities { - return consumer.Capabilities{ - MutatesData: true, - } -} - -func (p *logsGzipProcessor) Start(ctx context.Context, _ component.Host) error { - p.settings.Logger.Info("Starting logs gzip processor") - return nil -} - -func (p *logsGzipProcessor) Shutdown(ctx context.Context) error { - p.settings.Logger.Info("Shutting down logs gzip processor") - return nil -} diff --git a/internal/collector/logsgzipprocessor/processor_benchmark_test.go b/internal/collector/logsgzipprocessor/processor_benchmark_test.go index 4bb9ccab5..82477aef7 100644 --- a/internal/collector/logsgzipprocessor/processor_benchmark_test.go +++ b/internal/collector/logsgzipprocessor/processor_benchmark_test.go @@ -20,7 +20,7 @@ func generateLogs(numRecords, recordSize int) plog.Logs { logs := plog.NewLogs() rl := logs.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() - for i := 0; i < numRecords; i++ { + for range numRecords { lr := sl.LogRecords().AppendEmpty() content, _ := randomString(recordSize) lr.Body().SetStr(content) @@ -64,7 +64,7 @@ func BenchmarkGzipProcessor(b *testing.B) { logs := generateLogs(bm.numRecords, bm.recordSize) b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _ = p.ConsumeLogs(context.Background(), logs) } }) diff --git a/internal/collector/nginxossreceiver/factory_test.go b/internal/collector/nginxossreceiver/factory_test.go index 9f64e3511..a48c3940d 100644 --- a/internal/collector/nginxossreceiver/factory_test.go +++ b/internal/collector/nginxossreceiver/factory_test.go @@ -23,7 +23,7 @@ import ( func TestType(t *testing.T) { factory := NewFactory() ft := factory.Type() - require.EqualValues(t, metadata.Type, ft) + require.Equal(t, metadata.Type, ft) } func TestValidConfig(t *testing.T) { diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go index e8af8cd1e..0e0b59b6b 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go @@ -224,6 +224,12 @@ func (nls *NginxLogScraper) Shutdown(_ context.Context) error { return err } +func (nls *NginxLogScraper) ConsumerCallback(_ context.Context, entries []*entry.Entry) { + nls.mut.Lock() + nls.entries = append(nls.entries, entries...) + nls.mut.Unlock() +} + func (nls *NginxLogScraper) initStanzaPipeline( operators []operator.Config, logger *zap.Logger, @@ -248,12 +254,6 @@ func (nls *NginxLogScraper) initStanzaPipeline( return pipe, err } -func (nls *NginxLogScraper) ConsumerCallback(_ context.Context, entries []*entry.Entry) { - nls.mut.Lock() - nls.entries = append(nls.entries, entries...) - nls.mut.Unlock() -} - func (nls *NginxLogScraper) runConsumer(ctx context.Context) { nls.logger.Info("Starting NGINX access log receiver's consumer") defer nls.wg.Done() diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file/config.go b/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file/config.go index bdb9b9030..803eeee62 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file/config.go +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file/config.go @@ -21,6 +21,13 @@ import ( const operatorType = "access_log_file_input" +// Config is the configuration of a file input operator +type Config struct { + helper.InputConfig `mapstructure:",squash"` + AccessLogFormat string `mapstructure:"access_log_format"` + fileconsumer.Config `mapstructure:",squash"` +} + func init() { operator.Register(operatorType, func() operator.Builder { return NewConfig() }) } @@ -38,13 +45,6 @@ func NewConfigWithID(operatorID string) *Config { } } -// Config is the configuration of a file input operator -type Config struct { - helper.InputConfig `mapstructure:",squash"` - AccessLogFormat string `mapstructure:"access_log_format"` - fileconsumer.Config `mapstructure:",squash"` -} - // Build will build a file input operator from the supplied configuration // nolint: ireturn func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error) { diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file/config_test.go b/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file/config_test.go index 8fc2040e2..70ee83087 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file/config_test.go +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file/config_test.go @@ -27,7 +27,7 @@ func TestNewConfig(t *testing.T) { config := NewConfig() assert.NotNil(t, config) - assert.Equal(t, "access_log_file_input", config.InputConfig.OperatorID) + assert.Equal(t, "access_log_file_input", config.OperatorID) } func TestConfig_Build(t *testing.T) { diff --git a/internal/collector/otel_collector_plugin.go b/internal/collector/otel_collector_plugin.go index 18c101818..a1c6a9876 100644 --- a/internal/collector/otel_collector_plugin.go +++ b/internal/collector/otel_collector_plugin.go @@ -136,6 +136,63 @@ func (oc *Collector) Init(ctx context.Context, mp bus.MessagePipeInterface) erro return nil } +// Info the plugin. +func (oc *Collector) Info() *bus.Info { + return &bus.Info{ + Name: "collector", + } +} + +// Close the plugin. +func (oc *Collector) Close(ctx context.Context) error { + slog.InfoContext(ctx, "Closing OTel Collector plugin") + + if !oc.stopped { + slog.InfoContext(ctx, "Shutting down OTel Collector", "state", oc.service.GetState()) + oc.service.Shutdown() + oc.cancel() + + settings := *oc.config.Client.Backoff + settings.MaxElapsedTime = maxTimeToWaitForShutdown + err := backoff.WaitUntil(ctx, &settings, func() error { + if oc.service.GetState() == otelcol.StateClosed { + return nil + } + + return errors.New("OTel Collector not in a closed state yet") + }) + + if err != nil { + slog.ErrorContext(ctx, "Failed to shutdown OTel Collector", "error", err, "state", oc.service.GetState()) + } else { + slog.InfoContext(ctx, "OTel Collector shutdown", "state", oc.service.GetState()) + oc.stopped = true + } + } + + return nil +} + +// Process an incoming Message Bus message in the plugin +func (oc *Collector) Process(ctx context.Context, msg *bus.Message) { + switch msg.Topic { + case bus.NginxConfigUpdateTopic: + oc.handleNginxConfigUpdate(ctx, msg) + case bus.ResourceUpdateTopic: + oc.handleResourceUpdate(ctx, msg) + default: + slog.DebugContext(ctx, "OTel collector plugin unknown topic", "topic", msg.Topic) + } +} + +// Subscriptions returns the list of topics the plugin is subscribed to +func (oc *Collector) Subscriptions() []string { + return []string{ + bus.ResourceUpdateTopic, + bus.NginxConfigUpdateTopic, + } +} + // Process receivers and log warning for sub-optimal configurations func (oc *Collector) processReceivers(ctx context.Context, receivers []config.OtlpReceiver) { for _, receiver := range receivers { @@ -167,7 +224,7 @@ func (oc *Collector) bootup(ctx context.Context) error { go func() { if oc.service == nil { - errChan <- fmt.Errorf("unable to start OTel collector: service is nil") + errChan <- errors.New("unable to start OTel collector: service is nil") return } @@ -184,7 +241,7 @@ func (oc *Collector) bootup(ctx context.Context) error { return err default: if oc.service == nil { - return fmt.Errorf("unable to start otel collector: service is nil") + return errors.New("unable to start otel collector: service is nil") } state := oc.service.GetState() @@ -205,63 +262,6 @@ func (oc *Collector) bootup(ctx context.Context) error { } } -// Info the plugin. -func (oc *Collector) Info() *bus.Info { - return &bus.Info{ - Name: "collector", - } -} - -// Close the plugin. -func (oc *Collector) Close(ctx context.Context) error { - slog.InfoContext(ctx, "Closing OTel Collector plugin") - - if !oc.stopped { - slog.InfoContext(ctx, "Shutting down OTel Collector", "state", oc.service.GetState()) - oc.service.Shutdown() - oc.cancel() - - settings := *oc.config.Client.Backoff - settings.MaxElapsedTime = maxTimeToWaitForShutdown - err := backoff.WaitUntil(ctx, &settings, func() error { - if oc.service.GetState() == otelcol.StateClosed { - return nil - } - - return errors.New("OTel Collector not in a closed state yet") - }) - - if err != nil { - slog.ErrorContext(ctx, "Failed to shutdown OTel Collector", "error", err, "state", oc.service.GetState()) - } else { - slog.InfoContext(ctx, "OTel Collector shutdown", "state", oc.service.GetState()) - oc.stopped = true - } - } - - return nil -} - -// Process an incoming Message Bus message in the plugin -func (oc *Collector) Process(ctx context.Context, msg *bus.Message) { - switch msg.Topic { - case bus.NginxConfigUpdateTopic: - oc.handleNginxConfigUpdate(ctx, msg) - case bus.ResourceUpdateTopic: - oc.handleResourceUpdate(ctx, msg) - default: - slog.DebugContext(ctx, "OTel collector plugin unknown topic", "topic", msg.Topic) - } -} - -// Subscriptions returns the list of topics the plugin is subscribed to -func (oc *Collector) Subscriptions() []string { - return []string{ - bus.ResourceUpdateTopic, - bus.NginxConfigUpdateTopic, - } -} - func (oc *Collector) handleNginxConfigUpdate(ctx context.Context, msg *bus.Message) { slog.DebugContext(ctx, "OTel collector plugin received nginx config update message") oc.mu.Lock() @@ -436,7 +436,7 @@ func (oc *Collector) checkForNewReceivers(ctx context.Context, nginxConfigContex reloadCollector = true } } else { - slog.Debug("NAP logs feature disabled", "enabled_features", oc.config.Features) + slog.DebugContext(ctx, "NAP logs feature disabled", "enabled_features", oc.config.Features) } return reloadCollector diff --git a/internal/collector/otel_collector_plugin_test.go b/internal/collector/otel_collector_plugin_test.go index a767c62cc..d625d13b7 100644 --- a/internal/collector/otel_collector_plugin_test.go +++ b/internal/collector/otel_collector_plugin_test.go @@ -8,7 +8,6 @@ import ( "bytes" "context" "errors" - "fmt" "path/filepath" "testing" @@ -252,7 +251,7 @@ func TestCollector_ProcessNginxConfigUpdateTopic(t *testing.T) { if len(test.receivers.NginxPlusReceivers) == 1 { apiDetails := config.APIDetails{ - URL: fmt.Sprintf("%s/api", nginxPlusMock.URL), + URL: nginxPlusMock.URL + "/api", Listen: "", Location: "", } @@ -270,7 +269,7 @@ func TestCollector_ProcessNginxConfigUpdateTopic(t *testing.T) { model.PlusAPI.Location = apiDetails.Location } else { apiDetails := config.APIDetails{ - URL: fmt.Sprintf("%s/stub_status", nginxPlusMock.URL), + URL: nginxPlusMock.URL + "/stub_status", Listen: "", Location: "", } diff --git a/internal/command/command_plugin.go b/internal/command/command_plugin.go index 32797cf47..7ace559ce 100644 --- a/internal/command/command_plugin.go +++ b/internal/command/command_plugin.go @@ -106,32 +106,43 @@ func (cp *CommandPlugin) Info() *bus.Info { func (cp *CommandPlugin) Process(ctx context.Context, msg *bus.Message) { slog.DebugContext(ctx, "Processing command") + ctxWithMetadata := cp.config.NewContextWithLabels(ctx) - if logger.ServerType(ctx) == "" { - ctx = context.WithValue( - ctx, + if logger.ServerType(ctxWithMetadata) == "" { + ctxWithMetadata = context.WithValue( + ctxWithMetadata, logger.ServerTypeContextKey, slog.Any(logger.ServerTypeKey, cp.commandServerType.String()), ) } - if logger.ServerType(ctx) == cp.commandServerType.String() { + if logger.ServerType(ctxWithMetadata) == cp.commandServerType.String() { switch msg.Topic { case bus.ConnectionResetTopic: - cp.processConnectionReset(ctx, msg) + cp.processConnectionReset(ctxWithMetadata, msg) case bus.ResourceUpdateTopic: - cp.processResourceUpdate(ctx, msg) + cp.processResourceUpdate(ctxWithMetadata, msg) case bus.InstanceHealthTopic: - cp.processInstanceHealth(ctx, msg) + cp.processInstanceHealth(ctxWithMetadata, msg) case bus.DataPlaneHealthResponseTopic: - cp.processDataPlaneHealth(ctx, msg) + cp.processDataPlaneHealth(ctxWithMetadata, msg) case bus.DataPlaneResponseTopic: - cp.processDataPlaneResponse(ctx, msg) + cp.processDataPlaneResponse(ctxWithMetadata, msg) default: - slog.DebugContext(ctx, "Command plugin received unknown topic", "topic", msg.Topic) + slog.DebugContext(ctxWithMetadata, "Command plugin received unknown topic", "topic", msg.Topic) } } } +func (cp *CommandPlugin) Subscriptions() []string { + return []string{ + bus.ConnectionResetTopic, + bus.ResourceUpdateTopic, + bus.InstanceHealthTopic, + bus.DataPlaneHealthResponseTopic, + bus.DataPlaneResponseTopic, + } +} + func (cp *CommandPlugin) processResourceUpdate(ctx context.Context, msg *bus.Message) { slog.DebugContext(ctx, "Command plugin received resource update message") if resource, ok := msg.Data.(*mpi.Resource); ok { @@ -233,16 +244,6 @@ func (cp *CommandPlugin) processConnectionReset(ctx context.Context, msg *bus.Me } } -func (cp *CommandPlugin) Subscriptions() []string { - return []string{ - bus.ConnectionResetTopic, - bus.ResourceUpdateTopic, - bus.InstanceHealthTopic, - bus.DataPlaneHealthResponseTopic, - bus.DataPlaneResponseTopic, - } -} - // nolint: revive, cyclop func (cp *CommandPlugin) monitorSubscribeChannel(ctx context.Context) { for { diff --git a/internal/command/command_service.go b/internal/command/command_service.go index 15b35604c..f0edc3615 100644 --- a/internal/command/command_service.go +++ b/internal/command/command_service.go @@ -330,7 +330,7 @@ func (cs *CommandService) sendResponseForQueuedConfigApplyRequests( indexOfConfigApplyRequest int, ) error { instanceID := response.GetInstanceId() - for i := 0; i < indexOfConfigApplyRequest; i++ { + for i := range indexOfConfigApplyRequest { newResponse := response newResponse.GetMessageMeta().MessageId = id.GenerateMessageID() @@ -464,7 +464,7 @@ func (cs *CommandService) handleSubscribeError(ctx context.Context, err error, e return nil } - slog.ErrorContext(ctx, fmt.Sprintf("Failed to %s", errorMsg), "error", err) + slog.ErrorContext(ctx, "Failed to"+errorMsg, "error", err) return err } diff --git a/internal/config/config.go b/internal/config/config.go index 39393bd47..4ebd75900 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -129,6 +129,7 @@ func ResolveConfig() (*Config, error) { } checkCollectorConfiguration(collector, config) + addLabelsAsOTelHeaders(collector, config.Labels) slog.Debug("Agent config", "config", config) slog.Info("Excluded files from being watched for file changes", "exclude_files", @@ -209,6 +210,22 @@ func defaultCollector(collector *Collector, config *Config) { } } +func addLabelsAsOTelHeaders(collector *Collector, labels map[string]any) { + slog.Debug("Adding labels as headers to collector", "labels", labels) + if collector.Extensions.HeadersSetter != nil { + for key, value := range labels { + valueString, ok := value.(string) + if ok { + collector.Extensions.HeadersSetter.Headers = append(collector.Extensions.HeadersSetter.Headers, Header{ + Action: "insert", + Key: key, + Value: valueString, + }) + } + } + } +} + func setVersion(version, commit string) { RootCommand.Version = version + "-" + commit viperInstance.SetDefault(VersionKey, version) @@ -602,7 +619,7 @@ func seekFileInPaths(fileName string, directories ...string) (string, error) { } } - return "", fmt.Errorf("a valid configuration has not been found in any of the search paths") + return "", errors.New("a valid configuration has not been found in any of the search paths") } func configFilePaths() []string { diff --git a/internal/config/config_test.go b/internal/config/config_test.go index f34fa9853..6f3ad0eed 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -9,6 +9,7 @@ import ( "errors" "os" "path" + "sort" "strings" "testing" "time" @@ -63,6 +64,10 @@ func TestResolveConfig(t *testing.T) { actual, err := ResolveConfig() require.NoError(t, err) + sort.Slice(actual.Collector.Extensions.HeadersSetter.Headers, func(i, j int) bool { + headers := actual.Collector.Extensions.HeadersSetter.Headers + return headers[i].Key < headers[j].Key + }) assert.Equal(t, createConfig(), actual) } @@ -1059,6 +1064,16 @@ func createConfig() *Config { Key: "key", Value: "value", }, + { + Action: "insert", + Key: "label1", + Value: "label 1", + }, + { + Action: "insert", + Key: "label2", + Value: "new-value", + }, }, }, }, diff --git a/internal/config/types.go b/internal/config/types.go index b0db71462..ae0558045 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -6,12 +6,15 @@ package config import ( + "context" "errors" "fmt" "path/filepath" "strings" "time" + "google.golang.org/grpc/metadata" + "github.com/google/uuid" ) @@ -432,6 +435,18 @@ func (c *Config) AreReceiversConfigured() bool { len(c.Collector.Receivers.TcplogReceivers) > 0 } +func (c *Config) NewContextWithLabels(ctx context.Context) context.Context { + md := metadata.Pairs() + for key, value := range c.Labels { + valueString, ok := value.(string) + if ok { + md.Set(key, valueString) + } + } + + return metadata.NewOutgoingContext(ctx, md) +} + func isAllowedDir(dir string, allowedDirs []string) bool { if !strings.HasSuffix(dir, "/") && filepath.Ext(dir) == "" { dir += "/" diff --git a/internal/datasource/cert/cert.go b/internal/datasource/cert/cert.go index 95dd257ef..f640c9f5a 100644 --- a/internal/datasource/cert/cert.go +++ b/internal/datasource/cert/cert.go @@ -8,7 +8,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" - "fmt" + "errors" "os" ) @@ -37,7 +37,7 @@ func LoadCertificate(certPath string) (*x509.Certificate, error) { block, _ := pem.Decode(fileContents) if block == nil || block.Type != "CERTIFICATE" { - return nil, fmt.Errorf("failed to decode PEM block containing the certificate") + return nil, errors.New("failed to decode PEM block containing the certificate") } cert, err := x509.ParseCertificate(block.Bytes) diff --git a/internal/datasource/config/nginx_config_parser.go b/internal/datasource/config/nginx_config_parser.go index 949deb781..ebdef5441 100644 --- a/internal/datasource/config/nginx_config_parser.go +++ b/internal/datasource/config/nginx_config_parser.go @@ -510,10 +510,10 @@ func (ncp *NginxConfigParser) apiCallback(ctx context.Context, parent, for _, url := range urls { if ncp.pingAPIEndpoint(ctx, url, apiType) { - slog.DebugContext(ctx, fmt.Sprintf("%s found", apiType), "url", url) + slog.DebugContext(ctx, apiType+" found", "url", url) return url } - slog.DebugContext(ctx, fmt.Sprintf("%s is not reachable", apiType), "url", url) + slog.DebugContext(ctx, apiType+" is not reachable", "url", url) } return &model.APIDetails{ @@ -548,7 +548,7 @@ func (ncp *NginxConfigParser) pingAPIEndpoint(ctx context.Context, statusAPIDeta } if resp.StatusCode != http.StatusOK { - slog.DebugContext(ctx, fmt.Sprintf("%s API responded with unexpected status code", apiType), "status_code", + slog.DebugContext(ctx, apiType+" API responded with unexpected status code", "status_code", resp.StatusCode, "expected", http.StatusOK) return false @@ -705,11 +705,12 @@ func (ncp *NginxConfigParser) parseListenDirective( } func (ncp *NginxConfigParser) parseListenHostAndPort(listenHost, listenPort string) (hostname, port string) { - if listenHost == "*" || listenHost == "" { + switch listenHost { + case "*", "": hostname = "127.0.0.1" - } else if listenHost == "::" || listenHost == "::1" { + case "::", "::1": hostname = "[::1]" - } else { + default: hostname = listenHost } port = listenPort diff --git a/internal/datasource/config/nginx_config_parser_benchmark_test.go b/internal/datasource/config/nginx_config_parser_benchmark_test.go index e61fb2380..bfd3e2651 100644 --- a/internal/datasource/config/nginx_config_parser_benchmark_test.go +++ b/internal/datasource/config/nginx_config_parser_benchmark_test.go @@ -8,7 +8,6 @@ package config import ( "context" "fmt" - "io" "log/slog" "path/filepath" "testing" @@ -29,7 +28,7 @@ var configFilePaths = []string{ func BenchmarkNginxConfigParser_Parse(b *testing.B) { // Discard log messages - slog.SetDefault(slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{}))) + slog.SetDefault(slog.New(slog.DiscardHandler)) ctx := context.Background() agentConfig := types.AgentConfig() @@ -46,7 +45,7 @@ func BenchmarkNginxConfigParser_Parse(b *testing.B) { bb.ResetTimer() - for i := 0; i < bb.N; i++ { + for range bb.N { _, err := nginxConfigParser.Parse( ctx, &mpi.Instance{ @@ -67,7 +66,7 @@ func BenchmarkNginxConfigParser_Parse(b *testing.B) { // These tests don't exercise the traversal very well, they are more to track the growth of configs in size func BenchmarkNginxConfigParserGeneratedConfig_Parse(b *testing.B) { - slog.SetDefault(slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{}))) + slog.SetDefault(slog.New(slog.DiscardHandler)) ctx := context.Background() agentConfig := types.AgentConfig() @@ -107,7 +106,7 @@ func BenchmarkNginxConfigParserGeneratedConfig_Parse(b *testing.B) { bb.ResetTimer() - for i := 0; i < bb.N; i++ { + for range bb.N { _, parseErr := nginxConfigParser.Parse( ctx, &mpi.Instance{ diff --git a/internal/datasource/config/nginx_config_parser_test.go b/internal/datasource/config/nginx_config_parser_test.go index 348d69de7..18e85e447 100644 --- a/internal/datasource/config/nginx_config_parser_test.go +++ b/internal/datasource/config/nginx_config_parser_test.go @@ -554,7 +554,7 @@ func TestNginxConfigParser_Parse(t *testing.T) { assert.ElementsMatch(t, test.expectedConfigContext.ErrorLogs, result.ErrorLogs) assert.Equal(t, test.expectedConfigContext.StubStatus, result.StubStatus) assert.Equal(t, test.expectedConfigContext.InstanceID, result.InstanceID) - assert.Equal(t, len(test.expectedConfigContext.Files), len(result.Files)) + assert.Len(t, result.Files, len(test.expectedConfigContext.Files)) }) } } @@ -1377,7 +1377,7 @@ func TestNginxConfigParser_checkDuplicate(t *testing.T) { } func protoListEqual(protoListA, protoListB []*mpi.File) bool { - for i := 0; i < len(protoListA); i++ { + for i := range protoListA { res := proto.Equal(protoListA[i], protoListB[i]) if !res { return false diff --git a/internal/file/file_manager_service.go b/internal/file/file_manager_service.go index b8076fdfb..8953aaf5a 100644 --- a/internal/file/file_manager_service.go +++ b/internal/file/file_manager_service.go @@ -88,6 +88,7 @@ type ( ) type FileManagerService struct { + manifestLock *sync.RWMutex agentConfig *config.Config fileOperator fileOperator fileServiceOperator fileServiceOperatorInterface @@ -102,16 +103,19 @@ type FileManagerService struct { filesMutex sync.RWMutex } -func NewFileManagerService(fileServiceClient mpi.FileServiceClient, agentConfig *config.Config) *FileManagerService { +func NewFileManagerService(fileServiceClient mpi.FileServiceClient, agentConfig *config.Config, + manifestLock *sync.RWMutex, +) *FileManagerService { return &FileManagerService{ agentConfig: agentConfig, - fileOperator: NewFileOperator(), - fileServiceOperator: NewFileServiceOperator(agentConfig, fileServiceClient), + fileOperator: NewFileOperator(manifestLock), + fileServiceOperator: NewFileServiceOperator(agentConfig, fileServiceClient, manifestLock), fileActions: make(map[string]*model.FileCache), rollbackFileContents: make(map[string][]byte), currentFilesOnDisk: make(map[string]*mpi.File), previousManifestFiles: make(map[string]*model.ManifestFile), manifestFilePath: agentConfig.ManifestDir + "/manifest.json", + manifestLock: manifestLock, } } @@ -129,7 +133,7 @@ func (fms *FileManagerService) ConfigApply(ctx context.Context, fileOverview := configApplyRequest.GetOverview() if fileOverview == nil { - return model.Error, fmt.Errorf("fileOverview is nil") + return model.Error, errors.New("fileOverview is nil") } allowedErr := fms.checkAllowedDirectory(fileOverview.GetFiles()) @@ -217,50 +221,6 @@ func (fms *FileManagerService) Rollback(ctx context.Context, instanceID string) return nil } -func (fms *FileManagerService) executeFileActions(ctx context.Context) error { - for _, fileAction := range fms.fileActions { - switch fileAction.Action { - case model.Delete: - slog.Debug("File action, deleting file", "file", fileAction.File.GetFileMeta().GetName()) - if err := os.Remove(fileAction.File.GetFileMeta().GetName()); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error deleting file: %s error: %w", - fileAction.File.GetFileMeta().GetName(), err) - } - - continue - case model.Add, model.Update: - slog.Debug("File action, add or update file", "file", fileAction.File.GetFileMeta().GetName()) - updateErr := fms.fileUpdate(ctx, fileAction.File) - if updateErr != nil { - return updateErr - } - case model.Unchanged: - slog.DebugContext(ctx, "File unchanged") - } - } - - return nil -} - -func (fms *FileManagerService) fileUpdate(ctx context.Context, file *mpi.File) error { - if file.GetFileMeta().GetSize() <= int64(fms.agentConfig.Client.Grpc.MaxFileSize) { - return fms.fileServiceOperator.File(ctx, file, fms.fileActions) - } - - return fms.fileServiceOperator.ChunkedFile(ctx, file) -} - -func (fms *FileManagerService) checkAllowedDirectory(checkFiles []*mpi.File) error { - for _, file := range checkFiles { - allowed := fms.agentConfig.IsDirectoryAllowed(file.GetFileMeta().GetName()) - if !allowed { - return fmt.Errorf("file not in allowed directories %s", file.GetFileMeta().GetName()) - } - } - - return nil -} - func (fms *FileManagerService) ConfigUpdate(ctx context.Context, nginxConfigContext *model.NginxConfigContext, ) { @@ -423,9 +383,13 @@ func (fms *FileManagerService) UpdateCurrentFilesOnDisk( // seems to be a control flag, avoid control coupling // nolint: revive func (fms *FileManagerService) UpdateManifestFile(currentFiles map[string]*mpi.File, referenced bool) (err error) { + slog.Debug("Updating manifest file", "current_files", currentFiles, "referenced", referenced) currentManifestFiles, _, readError := fms.manifestFile() fms.previousManifestFiles = currentManifestFiles if readError != nil && !errors.Is(readError, os.ErrNotExist) { + slog.Debug("Error reading manifest file", "current_manifest_files", + currentManifestFiles, "updated_files", currentFiles, "referenced", referenced) + return fmt.Errorf("unable to read manifest file: %w", readError) } @@ -457,6 +421,8 @@ func (fms *FileManagerService) manifestFile() (map[string]*model.ManifestFile, m return nil, nil, err } + fms.manifestLock.Lock() + defer fms.manifestLock.Unlock() file, err := os.ReadFile(fms.manifestFilePath) if err != nil { return nil, nil, fmt.Errorf("failed to read manifest file: %w", err) @@ -466,6 +432,10 @@ func (fms *FileManagerService) manifestFile() (map[string]*model.ManifestFile, m err = json.Unmarshal(file, &manifestFiles) if err != nil { + if len(file) == 0 { + return nil, nil, fmt.Errorf("manifest file is empty: %w", err) + } + return nil, nil, fmt.Errorf("failed to parse manifest file: %w", err) } @@ -474,6 +444,50 @@ func (fms *FileManagerService) manifestFile() (map[string]*model.ManifestFile, m return manifestFiles, fileMap, nil } +func (fms *FileManagerService) executeFileActions(ctx context.Context) error { + for _, fileAction := range fms.fileActions { + switch fileAction.Action { + case model.Delete: + slog.DebugContext(ctx, "File action, deleting file", "file", fileAction.File.GetFileMeta().GetName()) + if err := os.Remove(fileAction.File.GetFileMeta().GetName()); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error deleting file: %s error: %w", + fileAction.File.GetFileMeta().GetName(), err) + } + + continue + case model.Add, model.Update: + slog.DebugContext(ctx, "File action, add or update file", "file", fileAction.File.GetFileMeta().GetName()) + updateErr := fms.fileUpdate(ctx, fileAction.File) + if updateErr != nil { + return updateErr + } + case model.Unchanged: + slog.DebugContext(ctx, "File unchanged") + } + } + + return nil +} + +func (fms *FileManagerService) fileUpdate(ctx context.Context, file *mpi.File) error { + if file.GetFileMeta().GetSize() <= int64(fms.agentConfig.Client.Grpc.MaxFileSize) { + return fms.fileServiceOperator.File(ctx, file, fms.fileActions) + } + + return fms.fileServiceOperator.ChunkedFile(ctx, file) +} + +func (fms *FileManagerService) checkAllowedDirectory(checkFiles []*mpi.File) error { + for _, file := range checkFiles { + allowed := fms.agentConfig.IsDirectoryAllowed(file.GetFileMeta().GetName()) + if !allowed { + return fmt.Errorf("file not in allowed directories %s", file.GetFileMeta().GetName()) + } + } + + return nil +} + func (fms *FileManagerService) convertToManifestFileMap( currentFiles map[string]*mpi.File, referenced bool, diff --git a/internal/file/file_manager_service_test.go b/internal/file/file_manager_service_test.go index 4cff68071..01a0f7cda 100644 --- a/internal/file/file_manager_service_test.go +++ b/internal/file/file_manager_service_test.go @@ -11,6 +11,7 @@ import ( "fmt" "os" "path/filepath" + "sync" "testing" "github.com/nginx/agent/v3/internal/model" @@ -54,7 +55,7 @@ func TestFileManagerService_ConfigApply_Add(t *testing.T) { agentConfig := types.AgentConfig() agentConfig.AllowedDirectories = []string{tempDir} - fileManagerService := NewFileManagerService(fakeFileServiceClient, agentConfig) + fileManagerService := NewFileManagerService(fakeFileServiceClient, agentConfig, &sync.RWMutex{}) fileManagerService.agentConfig.ManifestDir = manifestDirPath fileManagerService.manifestFilePath = manifestFilePath @@ -91,7 +92,7 @@ func TestFileManagerService_ConfigApply_Add_LargeFile(t *testing.T) { fileName: filePath, } - for i := 0; i < len(fileContent); i++ { + for i := range fileContent { fakeServerStreamingClient.chunks[uint32(i)] = []byte{fileContent[i]} } @@ -101,7 +102,7 @@ func TestFileManagerService_ConfigApply_Add_LargeFile(t *testing.T) { fakeFileServiceClient.GetFileStreamReturns(fakeServerStreamingClient, nil) agentConfig := types.AgentConfig() agentConfig.AllowedDirectories = []string{tempDir} - fileManagerService := NewFileManagerService(fakeFileServiceClient, agentConfig) + fileManagerService := NewFileManagerService(fakeFileServiceClient, agentConfig, &sync.RWMutex{}) fileManagerService.agentConfig.ManifestDir = manifestDirPath fileManagerService.manifestFilePath = manifestFilePath @@ -160,7 +161,7 @@ func TestFileManagerService_ConfigApply_Update(t *testing.T) { agentConfig := types.AgentConfig() agentConfig.AllowedDirectories = []string{tempDir} - fileManagerService := NewFileManagerService(fakeFileServiceClient, agentConfig) + fileManagerService := NewFileManagerService(fakeFileServiceClient, agentConfig, &sync.RWMutex{}) fileManagerService.agentConfig.ManifestDir = manifestDirPath fileManagerService.manifestFilePath = manifestFilePath err := fileManagerService.UpdateCurrentFilesOnDisk(ctx, filesOnDisk, false) @@ -209,7 +210,7 @@ func TestFileManagerService_ConfigApply_Delete(t *testing.T) { agentConfig := types.AgentConfig() agentConfig.AllowedDirectories = []string{tempDir} - fileManagerService := NewFileManagerService(fakeFileServiceClient, agentConfig) + fileManagerService := NewFileManagerService(fakeFileServiceClient, agentConfig, &sync.RWMutex{}) fileManagerService.agentConfig.ManifestDir = manifestDirPath fileManagerService.manifestFilePath = manifestFilePath err := fileManagerService.UpdateCurrentFilesOnDisk(ctx, filesOnDisk, false) @@ -247,7 +248,7 @@ func TestFileManagerService_ConfigApply_Delete(t *testing.T) { func TestFileManagerService_checkAllowedDirectory(t *testing.T) { fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig()) + fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig(), &sync.RWMutex{}) allowedFiles := []*mpi.File{ { @@ -281,7 +282,7 @@ func TestFileManagerService_checkAllowedDirectory(t *testing.T) { func TestFileManagerService_ClearCache(t *testing.T) { fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig()) + fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig(), &sync.RWMutex{}) filesCache := map[string]*model.FileCache{ "file/path/test.conf": { @@ -394,7 +395,7 @@ func TestFileManagerService_Rollback(t *testing.T) { instanceID := protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId() fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig()) + fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig(), &sync.RWMutex{}) fileManagerService.rollbackFileContents = fileContentCache fileManagerService.fileActions = filesCache fileManagerService.agentConfig.ManifestDir = manifestDirPath @@ -576,7 +577,7 @@ func TestFileManagerService_DetermineFileActions(t *testing.T) { manifestFilePath := manifestFile.Name() fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig()) + fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig(), &sync.RWMutex{}) fileManagerService.agentConfig.ManifestDir = manifestDirPath fileManagerService.manifestFilePath = manifestFilePath @@ -597,7 +598,7 @@ func TestFileManagerService_DetermineFileActions(t *testing.T) { func CreateTestManifestFile(t testing.TB, tempDir string, currentFiles map[string]*mpi.File) *os.File { t.Helper() fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig()) + fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig(), &sync.RWMutex{}) manifestFiles := fileManagerService.convertToManifestFileMap(currentFiles, true) manifestJSON, err := json.MarshalIndent(manifestFiles, "", " ") require.NoError(t, err) @@ -685,7 +686,7 @@ func TestFileManagerService_fileActions(t *testing.T) { Contents: newFileContent, }, }, nil) - fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig()) + fileManagerService := NewFileManagerService(fakeFileServiceClient, types.AgentConfig(), &sync.RWMutex{}) fileManagerService.fileActions = filesCache @@ -754,7 +755,7 @@ rQHX6DP4w6IwZY8JB8LS if test.certContent == "" { _, certBytes = helpers.GenerateSelfSignedCert(t) certContents := helpers.Cert{ - Name: fmt.Sprintf("%s.pem", test.certName), + Name: test.certName + ".pem", Type: "CERTIFICATE", Contents: certBytes, } diff --git a/internal/file/file_operator.go b/internal/file/file_operator.go index 88539ce3a..ee65925c8 100644 --- a/internal/file/file_operator.go +++ b/internal/file/file_operator.go @@ -14,6 +14,7 @@ import ( "log/slog" "os" "path" + "sync" "github.com/nginx/agent/v3/internal/model" @@ -24,14 +25,18 @@ import ( mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" ) -type FileOperator struct{} +type FileOperator struct { + manifestLock *sync.RWMutex +} var _ fileOperator = (*FileOperator)(nil) // FileOperator only purpose is to write files, -func NewFileOperator() *FileOperator { - return &FileOperator{} +func NewFileOperator(manifestLock *sync.RWMutex) *FileOperator { + return &FileOperator{ + manifestLock: manifestLock, + } } func (fo *FileOperator) Write(ctx context.Context, fileContent []byte, file *mpi.FileMeta) error { @@ -98,7 +103,7 @@ func (fo *FileOperator) WriteChunkedFile( } slog.DebugContext(ctx, "Writing chunked file", "file", file.GetFileMeta().GetName()) - for i := uint32(0); i < header.GetChunks(); i++ { + for range header.GetChunks() { chunk, recvError := stream.Recv() if recvError != nil { return recvError @@ -152,6 +157,8 @@ func (fo *FileOperator) WriteManifestFile(updatedFiles map[string]*model.Manifes return fmt.Errorf("unable to marshal manifest file json: %w", err) } + fo.manifestLock.Lock() + defer fo.manifestLock.Unlock() // 0755 allows read/execute for all, write for owner if err = os.MkdirAll(manifestDir, dirPerm); err != nil { return fmt.Errorf("unable to create directory %s: %w", manifestDir, err) diff --git a/internal/file/file_operator_test.go b/internal/file/file_operator_test.go index 182c87025..78ea9a193 100644 --- a/internal/file/file_operator_test.go +++ b/internal/file/file_operator_test.go @@ -9,6 +9,7 @@ import ( "context" "os" "path/filepath" + "sync" "testing" "github.com/nginx/agent/v3/pkg/files" @@ -28,7 +29,7 @@ func TestFileOperator_Write(t *testing.T) { fileContent, err := os.ReadFile("../../test/config/nginx/nginx.conf") require.NoError(t, err) defer helpers.RemoveFileWithErrorCheck(t, filePath) - fileOp := NewFileOperator() + fileOp := NewFileOperator(&sync.RWMutex{}) fileMeta := protos.FileMeta(filePath, files.GenerateHash(fileContent)) diff --git a/internal/file/file_plugin.go b/internal/file/file_plugin.go index 330c02335..d94796d2f 100644 --- a/internal/file/file_plugin.go +++ b/internal/file/file_plugin.go @@ -8,6 +8,7 @@ package file import ( "context" "log/slog" + "sync" "github.com/nginx/agent/v3/pkg/files" "github.com/nginx/agent/v3/pkg/id" @@ -27,6 +28,7 @@ var _ bus.Plugin = (*FilePlugin)(nil) // the file plugin does not care about the instance type type FilePlugin struct { + manifestLock *sync.RWMutex messagePipe bus.MessagePipeInterface config *config.Config conn grpc.GrpcConnectionInterface @@ -35,12 +37,13 @@ type FilePlugin struct { } func NewFilePlugin(agentConfig *config.Config, grpcConnection grpc.GrpcConnectionInterface, - serverType model.ServerType, + serverType model.ServerType, manifestLock *sync.RWMutex, ) *FilePlugin { return &FilePlugin{ - config: agentConfig, - conn: grpcConnection, - serverType: serverType, + config: agentConfig, + conn: grpcConnection, + serverType: serverType, + manifestLock: manifestLock, } } @@ -52,7 +55,7 @@ func (fp *FilePlugin) Init(ctx context.Context, messagePipe bus.MessagePipeInter slog.DebugContext(ctx, "Starting file plugin") fp.messagePipe = messagePipe - fp.fileManagerService = NewFileManagerService(fp.conn.FileServiceClient(), fp.config) + fp.fileManagerService = NewFileManagerService(fp.conn.FileServiceClient(), fp.config, fp.manifestLock) return nil } @@ -80,34 +83,36 @@ func (fp *FilePlugin) Info() *bus.Info { // nolint: cyclop, revive func (fp *FilePlugin) Process(ctx context.Context, msg *bus.Message) { + ctxWithMetadata := fp.config.NewContextWithLabels(ctx) + if logger.ServerType(ctx) == "" { - ctx = context.WithValue( - ctx, + ctxWithMetadata = context.WithValue( + ctxWithMetadata, logger.ServerTypeContextKey, slog.Any(logger.ServerTypeKey, fp.serverType.String()), ) } - if logger.ServerType(ctx) == fp.serverType.String() { + if logger.ServerType(ctxWithMetadata) == fp.serverType.String() { switch msg.Topic { case bus.ConnectionResetTopic: - fp.handleConnectionReset(ctx, msg) + fp.handleConnectionReset(ctxWithMetadata, msg) case bus.ConnectionCreatedTopic: - slog.DebugContext(ctx, "File plugin received connection created message") + slog.DebugContext(ctxWithMetadata, "File plugin received connection created message") fp.fileManagerService.SetIsConnected(true) case bus.NginxConfigUpdateTopic: - fp.handleNginxConfigUpdate(ctx, msg) + fp.handleNginxConfigUpdate(ctxWithMetadata, msg) case bus.ConfigUploadRequestTopic: - fp.handleConfigUploadRequest(ctx, msg) + fp.handleConfigUploadRequest(ctxWithMetadata, msg) case bus.ConfigApplyRequestTopic: - fp.handleConfigApplyRequest(ctx, msg) + fp.handleConfigApplyRequest(ctxWithMetadata, msg) case bus.ConfigApplyCompleteTopic: - fp.handleConfigApplyComplete(ctx, msg) + fp.handleConfigApplyComplete(ctxWithMetadata, msg) case bus.ConfigApplySuccessfulTopic: - fp.handleConfigApplySuccess(ctx, msg) + fp.handleConfigApplySuccess(ctxWithMetadata, msg) case bus.ConfigApplyFailedTopic: - fp.handleConfigApplyFailedRequest(ctx, msg) + fp.handleConfigApplyFailedRequest(ctxWithMetadata, msg) default: - slog.DebugContext(ctx, "File plugin received unknown topic", "topic", msg.Topic) + slog.DebugContext(ctxWithMetadata, "File plugin received unknown topic", "topic", msg.Topic) } } } @@ -145,7 +150,7 @@ func (fp *FilePlugin) handleConnectionReset(ctx context.Context, msg *bus.Messag fp.conn = newConnection reconnect = fp.fileManagerService.IsConnected() - fp.fileManagerService = NewFileManagerService(fp.conn.FileServiceClient(), fp.config) + fp.fileManagerService = NewFileManagerService(fp.conn.FileServiceClient(), fp.config, fp.manifestLock) fp.fileManagerService.SetIsConnected(reconnect) slog.DebugContext(ctx, "File manager service client reset successfully") diff --git a/internal/file/file_plugin_test.go b/internal/file/file_plugin_test.go index f1cb08403..23403a71f 100644 --- a/internal/file/file_plugin_test.go +++ b/internal/file/file_plugin_test.go @@ -7,8 +7,9 @@ package file import ( "context" - "fmt" + "errors" "os" + "sync" "testing" "time" @@ -31,7 +32,8 @@ import ( ) func TestFilePlugin_Info(t *testing.T) { - filePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, model.Command) + filePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, + model.Command, &sync.RWMutex{}) assert.Equal(t, "file", filePlugin.Info().Name) } @@ -39,14 +41,15 @@ func TestFilePlugin_Close(t *testing.T) { ctx := context.Background() fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command) + filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) filePlugin.Close(ctx) assert.Equal(t, 1, fakeGrpcConnection.CloseCallCount()) } func TestFilePlugin_Subscriptions(t *testing.T) { - filePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, model.Command) + filePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, + model.Command, &sync.RWMutex{}) assert.Equal( t, []string{ @@ -62,7 +65,8 @@ func TestFilePlugin_Subscriptions(t *testing.T) { filePlugin.Subscriptions(), ) - readOnlyFilePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, model.Auxiliary) + readOnlyFilePlugin := NewFilePlugin(types.AgentConfig(), &grpcfakes.FakeGrpcConnectionInterface{}, + model.Auxiliary, &sync.RWMutex{}) assert.Equal(t, []string{ bus.ConnectionResetTopic, bus.ConnectionCreatedTopic, @@ -93,7 +97,7 @@ func TestFilePlugin_Process_NginxConfigUpdateTopic(t *testing.T) { fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) messagePipe := busfakes.NewFakeMessagePipe() - filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command) + filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) err := filePlugin.Init(ctx, messagePipe) require.NoError(t, err) @@ -112,7 +116,7 @@ func TestFilePlugin_Process_ConfigApplyRequestTopic(t *testing.T) { ctx := context.Background() tempDir := t.TempDir() - filePath := fmt.Sprintf("%s/nginx.conf", tempDir) + filePath := tempDir + "/nginx.conf" fileContent := []byte("location /test {\n return 200 \"Test location\\n\";\n}") fileHash := files.GenerateHash(fileContent) @@ -139,19 +143,19 @@ func TestFilePlugin_Process_ConfigApplyRequestTopic(t *testing.T) { }, { name: "Test 2 - Fail, Rollback", - configApplyReturnsErr: fmt.Errorf("something went wrong"), + configApplyReturnsErr: errors.New("something went wrong"), configApplyStatus: model.RollbackRequired, message: message, }, { name: "Test 3 - Fail, No Rollback", - configApplyReturnsErr: fmt.Errorf("something went wrong"), + configApplyReturnsErr: errors.New("something went wrong"), configApplyStatus: model.Error, message: message, }, { name: "Test 4 - Fail to cast payload", - configApplyReturnsErr: fmt.Errorf("something went wrong"), + configApplyReturnsErr: errors.New("something went wrong"), configApplyStatus: model.Error, message: nil, }, @@ -168,7 +172,7 @@ func TestFilePlugin_Process_ConfigApplyRequestTopic(t *testing.T) { fakeFileManagerService := &filefakes.FakeFileManagerServiceInterface{} fakeFileManagerService.ConfigApplyReturns(test.configApplyStatus, test.configApplyReturnsErr) messagePipe := busfakes.NewFakeMessagePipe() - filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command) + filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command, &sync.RWMutex{}) err := filePlugin.Init(ctx, messagePipe) filePlugin.fileManagerService = fakeFileManagerService require.NoError(t, err) @@ -266,7 +270,7 @@ func TestFilePlugin_Process_ConfigUploadRequestTopic(t *testing.T) { fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) messagePipe := busfakes.NewFakeMessagePipe() - filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command) + filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) err := filePlugin.Init(ctx, messagePipe) require.NoError(t, err) @@ -321,7 +325,7 @@ func TestFilePlugin_Process_ConfigUploadRequestTopic_Failure(t *testing.T) { fakeGrpcConnection.FileServiceClientReturns(fakeFileServiceClient) messagePipe := busfakes.NewFakeMessagePipe() - filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command) + filePlugin := NewFilePlugin(types.AgentConfig(), fakeGrpcConnection, model.Command, &sync.RWMutex{}) err := filePlugin.Init(ctx, messagePipe) require.NoError(t, err) @@ -367,13 +371,13 @@ func TestFilePlugin_Process_ConfigApplyFailedTopic(t *testing.T) { }, { name: "Test 2 - Rollback Fail", - rollbackReturns: fmt.Errorf("something went wrong"), + rollbackReturns: errors.New("something went wrong"), instanceID: instanceID, }, { name: "Test 3 - Fail to cast payload", - rollbackReturns: fmt.Errorf("something went wrong"), + rollbackReturns: errors.New("something went wrong"), instanceID: "", }, } @@ -389,7 +393,7 @@ func TestFilePlugin_Process_ConfigApplyFailedTopic(t *testing.T) { messagePipe := busfakes.NewFakeMessagePipe() agentConfig := types.AgentConfig() - filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command) + filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command, &sync.RWMutex{}) err := filePlugin.Init(ctx, messagePipe) require.NoError(t, err) @@ -398,7 +402,7 @@ func TestFilePlugin_Process_ConfigApplyFailedTopic(t *testing.T) { data := &model.ConfigApplyMessage{ CorrelationID: "dfsbhj6-bc92-30c1-a9c9-85591422068e", InstanceID: test.instanceID, - Error: fmt.Errorf("something went wrong with config apply"), + Error: errors.New("something went wrong with config apply"), } filePlugin.Process(ctx, &bus.Message{Topic: bus.ConfigApplyFailedTopic, Data: data}) @@ -436,7 +440,7 @@ func TestFilePlugin_Process_ConfigApplyRollbackCompleteTopic(t *testing.T) { messagePipe := busfakes.NewFakeMessagePipe() agentConfig := types.AgentConfig() fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command) + filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command, &sync.RWMutex{}) err := filePlugin.Init(ctx, messagePipe) require.NoError(t, err) @@ -481,7 +485,7 @@ func TestFilePlugin_Process_ConfigApplyCompleteTopic(t *testing.T) { messagePipe := busfakes.NewFakeMessagePipe() agentConfig := types.AgentConfig() fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} - filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command) + filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection, model.Command, &sync.RWMutex{}) err := filePlugin.Init(ctx, messagePipe) require.NoError(t, err) diff --git a/internal/file/file_service_operator.go b/internal/file/file_service_operator.go index 6bf835fa0..a37951b29 100644 --- a/internal/file/file_service_operator.go +++ b/internal/file/file_service_operator.go @@ -15,6 +15,7 @@ import ( "math" "os" "slices" + "sync" "sync/atomic" "github.com/cenkalti/backoff/v4" @@ -41,14 +42,16 @@ type FileServiceOperator struct { var _ fileServiceOperatorInterface = (*FileServiceOperator)(nil) -func NewFileServiceOperator(agentConfig *config.Config, fileServiceClient mpi.FileServiceClient) *FileServiceOperator { +func NewFileServiceOperator(agentConfig *config.Config, fileServiceClient mpi.FileServiceClient, + manifestLock *sync.RWMutex, +) *FileServiceOperator { isConnected := &atomic.Bool{} isConnected.Store(false) return &FileServiceOperator{ fileServiceClient: fileServiceClient, agentConfig: agentConfig, - fileOperator: NewFileOperator(), + fileOperator: NewFileOperator(manifestLock), isConnected: isConnected, } } @@ -97,20 +100,6 @@ func (fso *FileServiceOperator) File(ctx context.Context, file *mpi.File, return fso.validateFileHash(file.GetFileMeta().GetName(), fileActions) } -func (fso *FileServiceOperator) validateFileHash(filePath string, fileActions map[string]*model.FileCache) error { - content, err := os.ReadFile(filePath) - if err != nil { - return err - } - fileHash := files.GenerateHash(content) - - if fileHash != fileActions[filePath].File.GetFileMeta().GetHash() { - return fmt.Errorf("error writing file, file hash does not match for file %s", filePath) - } - - return nil -} - func (fso *FileServiceOperator) UpdateOverview( ctx context.Context, instanceID string, @@ -183,7 +172,7 @@ func (fso *FileServiceOperator) UpdateOverview( slog.DebugContext(newCtx, "UpdateOverview response", "response", response) if response.GetOverview() == nil { - slog.Debug("UpdateOverview response is empty") + slog.DebugContext(ctx, "UpdateOverview response is empty") return nil } delta := files.ConvertToMapOfFiles(response.GetOverview().GetFiles()) @@ -195,25 +184,37 @@ func (fso *FileServiceOperator) UpdateOverview( return err } -func (fso *FileServiceOperator) updateFiles( - ctx context.Context, - delta map[string]*mpi.File, - instanceID string, - iteration int, -) error { - diffFiles := slices.Collect(maps.Values(delta)) +func (fso *FileServiceOperator) ChunkedFile(ctx context.Context, file *mpi.File) error { + slog.DebugContext(ctx, "Getting chunked file", "file", file.GetFileMeta().GetName()) - for _, file := range diffFiles { - updateErr := fso.UpdateFile(ctx, instanceID, file) - if updateErr != nil { - return updateErr - } + stream, err := fso.fileServiceClient.GetFileStream(ctx, &mpi.GetFileRequest{ + MessageMeta: &mpi.MessageMeta{ + MessageId: id.GenerateMessageID(), + CorrelationId: logger.CorrelationID(ctx), + Timestamp: timestamppb.Now(), + }, + FileMeta: file.GetFileMeta(), + }) + if err != nil { + return fmt.Errorf("error getting file stream for %s: %w", file.GetFileMeta().GetName(), err) } - iteration++ - slog.Info("Updating file overview after file updates", "attempt_number", iteration) + // Get header chunk first + headerChunk, recvHeaderChunkError := stream.Recv() + if recvHeaderChunkError != nil { + return recvHeaderChunkError + } - return fso.UpdateOverview(ctx, instanceID, diffFiles, iteration) + slog.DebugContext(ctx, "File header chunk received", "header_chunk", headerChunk) + + header := headerChunk.GetHeader() + + writeChunkedFileError := fso.fileOperator.WriteChunkedFile(ctx, file, header, stream) + if writeChunkedFileError != nil { + return writeChunkedFileError + } + + return nil } func (fso *FileServiceOperator) UpdateFile( @@ -235,6 +236,41 @@ func (fso *FileServiceOperator) UpdateFile( return fso.sendUpdateFileStream(ctx, fileToUpdate, fso.agentConfig.Client.Grpc.FileChunkSize) } +func (fso *FileServiceOperator) validateFileHash(filePath string, fileActions map[string]*model.FileCache) error { + content, err := os.ReadFile(filePath) + if err != nil { + return err + } + fileHash := files.GenerateHash(content) + + if fileHash != fileActions[filePath].File.GetFileMeta().GetHash() { + return fmt.Errorf("error writing file, file hash does not match for file %s", filePath) + } + + return nil +} + +func (fso *FileServiceOperator) updateFiles( + ctx context.Context, + delta map[string]*mpi.File, + instanceID string, + iteration int, +) error { + diffFiles := slices.Collect(maps.Values(delta)) + + for _, file := range diffFiles { + updateErr := fso.UpdateFile(ctx, instanceID, file) + if updateErr != nil { + return updateErr + } + } + + iteration++ + slog.InfoContext(ctx, "Updating file overview after file updates", "attempt_number", iteration) + + return fso.UpdateOverview(ctx, instanceID, diffFiles, iteration) +} + func (fso *FileServiceOperator) sendUpdateFileRequest( ctx context.Context, fileToUpdate *mpi.File, @@ -304,7 +340,7 @@ func (fso *FileServiceOperator) sendUpdateFileStream( chunkSize uint32, ) error { if chunkSize == 0 { - return fmt.Errorf("file chunk size must be greater than zero") + return errors.New("file chunk size must be greater than zero") } updateFileStreamClient, err := fso.fileServiceClient.UpdateFileStream(ctx) @@ -465,39 +501,6 @@ func (fso *FileServiceOperator) sendFileUpdateStreamChunk( return backoff.Retry(sendUpdateFileChunk, backoffHelpers.Context(backOffCtx, fso.agentConfig.Client.Backoff)) } -func (fso *FileServiceOperator) ChunkedFile(ctx context.Context, file *mpi.File) error { - slog.DebugContext(ctx, "Getting chunked file", "file", file.GetFileMeta().GetName()) - - stream, err := fso.fileServiceClient.GetFileStream(ctx, &mpi.GetFileRequest{ - MessageMeta: &mpi.MessageMeta{ - MessageId: id.GenerateMessageID(), - CorrelationId: logger.CorrelationID(ctx), - Timestamp: timestamppb.Now(), - }, - FileMeta: file.GetFileMeta(), - }) - if err != nil { - return fmt.Errorf("error getting file stream for %s: %w", file.GetFileMeta().GetName(), err) - } - - // Get header chunk first - headerChunk, recvHeaderChunkError := stream.Recv() - if recvHeaderChunkError != nil { - return recvHeaderChunkError - } - - slog.DebugContext(ctx, "File header chunk received", "header_chunk", headerChunk) - - header := headerChunk.GetHeader() - - writeChunkedFileError := fso.fileOperator.WriteChunkedFile(ctx, file, header, stream) - if writeChunkedFileError != nil { - return writeChunkedFileError - } - - return nil -} - func (fso *FileServiceOperator) setupIdentifiers(ctx context.Context, iteration int) (context.Context, string) { correlationID := logger.CorrelationID(ctx) var requestCorrelationID slog.Attr diff --git a/internal/file/file_service_operator_test.go b/internal/file/file_service_operator_test.go index 632749fbb..c2f9bf22a 100644 --- a/internal/file/file_service_operator_test.go +++ b/internal/file/file_service_operator_test.go @@ -9,6 +9,7 @@ import ( "context" "os" "path/filepath" + "sync" "sync/atomic" "testing" @@ -45,7 +46,7 @@ func TestFileServiceOperator_UpdateOverview(t *testing.T) { fakeFileServiceClient.UpdateFileReturns(&mpi.UpdateFileResponse{}, nil) - fileServiceOperator := NewFileServiceOperator(types.AgentConfig(), fakeFileServiceClient) + fileServiceOperator := NewFileServiceOperator(types.AgentConfig(), fakeFileServiceClient, &sync.RWMutex{}) fileServiceOperator.SetIsConnected(true) err := fileServiceOperator.UpdateOverview(ctx, "123", []*mpi.File{ @@ -75,7 +76,7 @@ func TestFileServiceOperator_UpdateOverview_MaxIterations(t *testing.T) { fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} // do 5 iterations - for i := 0; i <= 5; i++ { + for i := range 6 { fakeFileServiceClient.UpdateOverviewReturnsOnCall(i, &mpi.UpdateOverviewResponse{ Overview: overview, }, nil) @@ -83,7 +84,7 @@ func TestFileServiceOperator_UpdateOverview_MaxIterations(t *testing.T) { fakeFileServiceClient.UpdateFileReturns(&mpi.UpdateFileResponse{}, nil) - fileServiceOperator := NewFileServiceOperator(types.AgentConfig(), fakeFileServiceClient) + fileServiceOperator := NewFileServiceOperator(types.AgentConfig(), fakeFileServiceClient, &sync.RWMutex{}) fileServiceOperator.SetIsConnected(true) err := fileServiceOperator.UpdateOverview(ctx, "123", []*mpi.File{ @@ -126,7 +127,7 @@ func TestFileManagerService_UpdateFile(t *testing.T) { } fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} - fileServiceOperator := NewFileServiceOperator(types.AgentConfig(), fakeFileServiceClient) + fileServiceOperator := NewFileServiceOperator(types.AgentConfig(), fakeFileServiceClient, &sync.RWMutex{}) fileServiceOperator.SetIsConnected(true) err := fileServiceOperator.UpdateFile(ctx, "123", &mpi.File{FileMeta: fileMeta}) @@ -150,7 +151,7 @@ func TestFileManagerService_UpdateFile_LargeFile(t *testing.T) { fakeFileServiceClient := &v1fakes.FakeFileServiceClient{} fakeClientStreamingClient := &FakeClientStreamingClient{sendCount: atomic.Int32{}} fakeFileServiceClient.UpdateFileStreamReturns(fakeClientStreamingClient, nil) - fileServiceOperator := NewFileServiceOperator(types.AgentConfig(), fakeFileServiceClient) + fileServiceOperator := NewFileServiceOperator(types.AgentConfig(), fakeFileServiceClient, &sync.RWMutex{}) fileServiceOperator.SetIsConnected(true) err := fileServiceOperator.UpdateFile(ctx, "123", &mpi.File{FileMeta: fileMeta}) diff --git a/internal/grpc/certificates.go b/internal/grpc/certificates.go index a66c7bf24..aabc34c01 100644 --- a/internal/grpc/certificates.go +++ b/internal/grpc/certificates.go @@ -8,6 +8,7 @@ package grpc import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "os" ) @@ -50,7 +51,7 @@ func appendCertKeyPair(tlsConfig *tls.Config, certFile, keyFile string) error { return nil } if certFile == "" || keyFile == "" { - return fmt.Errorf("cert and key must both be provided") + return errors.New("cert and key must both be provided") } certificate, err := tls.LoadX509KeyPair(certFile, keyFile) diff --git a/internal/grpc/grpc.go b/internal/grpc/grpc.go index a36c146df..fe0feaf62 100644 --- a/internal/grpc/grpc.go +++ b/internal/grpc/grpc.go @@ -12,6 +12,7 @@ import ( "fmt" "log/slog" "net" + "strconv" "strings" "sync" @@ -82,7 +83,7 @@ func NewGrpcConnection(ctx context.Context, agentConfig *config.Config, serverAddr := net.JoinHostPort( commandConfig.Server.Host, - fmt.Sprint(commandConfig.Server.Port), + strconv.Itoa(commandConfig.Server.Port), ) slog.InfoContext(ctx, "Dialing grpc server", "server_addr", serverAddr) diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 1fcdbe216..4cf10c869 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -154,9 +154,9 @@ func CorrelationIDAttr(ctx context.Context) slog.Attr { value, ok := ctx.Value(CorrelationIDContextKey).(slog.Attr) if !ok { correlationID := GenerateCorrelationID() - slog.Debug( + slog.DebugContext( + ctx, "Correlation ID not found in context, generating new correlation ID", - "correlation_id", correlationID) return GenerateCorrelationID() diff --git a/internal/plugin/plugin_manager.go b/internal/plugin/plugin_manager.go index 30bcfc637..3f2c3869a 100644 --- a/internal/plugin/plugin_manager.go +++ b/internal/plugin/plugin_manager.go @@ -8,6 +8,7 @@ package plugin import ( "context" "log/slog" + "sync" "github.com/nginx/agent/v3/internal/model" @@ -27,9 +28,11 @@ import ( func LoadPlugins(ctx context.Context, agentConfig *config.Config) []bus.Plugin { plugins := make([]bus.Plugin, 0) + manifestLock := &sync.RWMutex{} + plugins = addResourcePlugin(plugins, agentConfig) - plugins = addCommandAndFilePlugins(ctx, plugins, agentConfig) - plugins = addAuxiliaryCommandAndFilePlugins(ctx, plugins, agentConfig) + plugins = addCommandAndFilePlugins(ctx, plugins, agentConfig, manifestLock) + plugins = addAuxiliaryCommandAndFilePlugins(ctx, plugins, agentConfig, manifestLock) plugins = addCollectorPlugin(ctx, agentConfig, plugins) plugins = addWatcherPlugin(plugins, agentConfig) @@ -43,7 +46,9 @@ func addResourcePlugin(plugins []bus.Plugin, agentConfig *config.Config) []bus.P return plugins } -func addCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, agentConfig *config.Config) []bus.Plugin { +func addCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, agentConfig *config.Config, + manifestLock *sync.RWMutex, +) []bus.Plugin { if agentConfig.IsCommandGrpcClientConfigured() { grpcConnection, err := grpc.NewGrpcConnection(ctx, agentConfig, agentConfig.Command) if err != nil { @@ -51,7 +56,7 @@ func addCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, agentCo } else { commandPlugin := command.NewCommandPlugin(agentConfig, grpcConnection, model.Command) plugins = append(plugins, commandPlugin) - filePlugin := file.NewFilePlugin(agentConfig, grpcConnection, model.Command) + filePlugin := file.NewFilePlugin(agentConfig, grpcConnection, model.Command, manifestLock) plugins = append(plugins, filePlugin) } } else { @@ -63,7 +68,7 @@ func addCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, agentCo } func addAuxiliaryCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, - agentConfig *config.Config, + agentConfig *config.Config, manifestLock *sync.RWMutex, ) []bus.Plugin { if agentConfig.IsAuxiliaryCommandGrpcClientConfigured() { auxGRPCConnection, err := grpc.NewGrpcConnection(ctx, agentConfig, agentConfig.AuxiliaryCommand) @@ -72,7 +77,7 @@ func addAuxiliaryCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin } else { auxCommandPlugin := command.NewCommandPlugin(agentConfig, auxGRPCConnection, model.Auxiliary) plugins = append(plugins, auxCommandPlugin) - readFilePlugin := file.NewFilePlugin(agentConfig, auxGRPCConnection, model.Auxiliary) + readFilePlugin := file.NewFilePlugin(agentConfig, auxGRPCConnection, model.Auxiliary, manifestLock) plugins = append(plugins, readFilePlugin) } } else { diff --git a/internal/plugin/plugin_manager_test.go b/internal/plugin/plugin_manager_test.go index 53fe3663b..172cb1542 100644 --- a/internal/plugin/plugin_manager_test.go +++ b/internal/plugin/plugin_manager_test.go @@ -147,7 +147,7 @@ func TestLoadPlugins(t *testing.T) { t.Run(test.name, func(tt *testing.T) { t.Logf("running test %s", test.name) result := LoadPlugins(ctx, test.input) - assert.Equal(tt, len(test.expected), len(result)) + assert.Len(tt, result, len(test.expected)) for i, expectedPlugin := range test.expected { assert.IsType(tt, expectedPlugin, result[i]) } diff --git a/internal/resource/nginx_instance_operator.go b/internal/resource/nginx_instance_operator.go index 70fb3af0e..fa515c081 100644 --- a/internal/resource/nginx_instance_operator.go +++ b/internal/resource/nginx_instance_operator.go @@ -52,20 +52,6 @@ func (i *NginxInstanceOperator) Validate(ctx context.Context, instance *mpi.Inst return nil } -func (i *NginxInstanceOperator) validateConfigCheckResponse(out []byte) error { - if bytes.Contains(out, []byte("[emerg]")) || - bytes.Contains(out, []byte("[alert]")) || - bytes.Contains(out, []byte("[crit]")) { - return fmt.Errorf("error running nginx -t -c:\n%s", out) - } - - if i.treatWarningsAsErrors && bytes.Contains(out, []byte("[warn]")) { - return fmt.Errorf("error running nginx -t -c:\n%s", out) - } - - return nil -} - func (i *NginxInstanceOperator) Reload(ctx context.Context, instance *mpi.Instance) error { var errorsFound error slog.InfoContext(ctx, "Reloading NGINX PID", "pid", @@ -87,12 +73,12 @@ func (i *NginxInstanceOperator) Reload(ctx context.Context, instance *mpi.Instan numberOfExpectedMessages := len(errorLogs) - for i := 0; i < numberOfExpectedMessages; i++ { + for range numberOfExpectedMessages { logErr := <-logErrorChannel slog.InfoContext(ctx, "Message received in logErrorChannel", "error", logErr) if logErr != nil { errorsFound = errors.Join(errorsFound, logErr) - slog.Info("Errors Found", "", errorsFound) + slog.InfoContext(ctx, "Errors Found", "", errorsFound) } } @@ -105,6 +91,20 @@ func (i *NginxInstanceOperator) Reload(ctx context.Context, instance *mpi.Instan return nil } +func (i *NginxInstanceOperator) validateConfigCheckResponse(out []byte) error { + if bytes.Contains(out, []byte("[emerg]")) || + bytes.Contains(out, []byte("[alert]")) || + bytes.Contains(out, []byte("[crit]")) { + return fmt.Errorf("error running nginx -t -c:\n%s", out) + } + + if i.treatWarningsAsErrors && bytes.Contains(out, []byte("[warn]")) { + return fmt.Errorf("error running nginx -t -c:\n%s", out) + } + + return nil +} + func (i *NginxInstanceOperator) errorLogs(instance *mpi.Instance) (errorLogs []string) { if instance.GetInstanceMeta().GetInstanceType() == mpi.InstanceMeta_INSTANCE_TYPE_NGINX_PLUS { errorLogs = instance.GetInstanceRuntime().GetNginxPlusRuntimeInfo().GetErrorLogs() diff --git a/internal/resource/nginx_instance_operator_test.go b/internal/resource/nginx_instance_operator_test.go index 259c3f9ad..186aa05bb 100644 --- a/internal/resource/nginx_instance_operator_test.go +++ b/internal/resource/nginx_instance_operator_test.go @@ -75,7 +75,7 @@ func TestInstanceOperator_Validate(t *testing.T) { name: "Test 3: Validate Config failed", out: bytes.NewBufferString("nginx [emerg]"), err: nil, - expected: fmt.Errorf("error running nginx -t -c:\nnginx [emerg]"), + expected: errors.New("error running nginx -t -c:\nnginx [emerg]"), }, } diff --git a/internal/resource/resource_plugin.go b/internal/resource/resource_plugin.go index 940a4f247..32ec56606 100644 --- a/internal/resource/resource_plugin.go +++ b/internal/resource/resource_plugin.go @@ -228,7 +228,8 @@ func (r *Resource) handleWriteConfigSuccessful(ctx context.Context, msg *bus.Mes configContext, err := r.resourceService.ApplyConfig(ctx, data.InstanceID) if err != nil { data.Error = err - slog.Error("errors found during config apply, sending error status, rolling back config", "err", err) + slog.ErrorContext(ctx, "errors found during config apply, "+ + "sending error status, rolling back config", "err", err) dpResponse := response.CreateDataPlaneResponse(data.CorrelationID, mpi.CommandResponse_COMMAND_STATUS_ERROR, "Config apply failed, rolling back config", data.InstanceID, err.Error()) r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: dpResponse}) @@ -259,7 +260,7 @@ func (r *Resource) handleRollbackWrite(ctx context.Context, msg *bus.Message) { } _, err := r.resourceService.ApplyConfig(ctx, data.InstanceID) if err != nil { - slog.Error("errors found during rollback, sending failure status", "err", err) + slog.ErrorContext(ctx, "errors found during rollback, sending failure status", "err", err) rollbackResponse := response.CreateDataPlaneResponse(data.CorrelationID, mpi.CommandResponse_COMMAND_STATUS_ERROR, "Rollback failed", data.InstanceID, err.Error()) diff --git a/internal/resource/resource_plugin_test.go b/internal/resource/resource_plugin_test.go index 9af07e9ca..cecb17bbb 100644 --- a/internal/resource/resource_plugin_test.go +++ b/internal/resource/resource_plugin_test.go @@ -10,7 +10,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "sort" "testing" @@ -838,7 +837,7 @@ func TestResource_Process_Rollback(t *testing.T) { Data: &model.ConfigApplyMessage{ CorrelationID: "dfsbhj6-bc92-30c1-a9c9-85591422068e", InstanceID: protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(), - Error: fmt.Errorf("something went wrong with config apply"), + Error: errors.New("something went wrong with config apply"), }, }, rollbackErr: nil, @@ -851,7 +850,7 @@ func TestResource_Process_Rollback(t *testing.T) { Data: &model.ConfigApplyMessage{ CorrelationID: "", InstanceID: protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(), - Error: fmt.Errorf("something went wrong with config apply"), + Error: errors.New("something went wrong with config apply"), }, }, rollbackErr: errors.New("error reloading"), @@ -879,7 +878,7 @@ func TestResource_Process_Rollback(t *testing.T) { return messagePipe.Messages()[i].Topic < messagePipe.Messages()[j].Topic }) - assert.Equal(tt, len(test.topic), len(messagePipe.Messages())) + assert.Len(tt, messagePipe.Messages(), len(test.topic)) assert.Equal(t, test.topic[0], messagePipe.Messages()[0].Topic) diff --git a/internal/resource/resource_service.go b/internal/resource/resource_service.go index feb16263a..ed41c48a2 100644 --- a/internal/resource/resource_service.go +++ b/internal/resource/resource_service.go @@ -227,7 +227,7 @@ func (r *ResourceService) GetHTTPUpstreamServers(ctx context.Context, instance * servers, getServersErr := plusClient.GetHTTPServers(ctx, upstream) - slog.Warn("Error returned from NGINX Plus client, GetHTTPUpstreamServers", "err", getServersErr) + slog.WarnContext(ctx, "Error returned from NGINX Plus client, GetHTTPUpstreamServers", "err", getServersErr) return servers, createPlusAPIError(getServersErr) } @@ -242,7 +242,7 @@ func (r *ResourceService) GetUpstreams(ctx context.Context, instance *mpi.Instan servers, getUpstreamsErr := plusClient.GetUpstreams(ctx) - slog.Warn("Error returned from NGINX Plus client, GetUpstreams", "err", getUpstreamsErr) + slog.WarnContext(ctx, "Error returned from NGINX Plus client, GetUpstreams", "err", getUpstreamsErr) return servers, createPlusAPIError(getUpstreamsErr) } @@ -257,7 +257,7 @@ func (r *ResourceService) GetStreamUpstreams(ctx context.Context, instance *mpi. streamUpstreams, getServersErr := plusClient.GetStreamUpstreams(ctx) - slog.Warn("Error returned from NGINX Plus client, GetStreamUpstreams", "err", getServersErr) + slog.WarnContext(ctx, "Error returned from NGINX Plus client, GetStreamUpstreams", "err", getServersErr) return streamUpstreams, createPlusAPIError(getServersErr) } @@ -277,7 +277,7 @@ func (r *ResourceService) UpdateStreamServers(ctx context.Context, instance *mpi added, updated, deleted, updateError := plusClient.UpdateStreamServers(ctx, upstream, servers) - slog.Warn("Error returned from NGINX Plus client, UpdateStreamServers", "err", updateError) + slog.WarnContext(ctx, "Error returned from NGINX Plus client, UpdateStreamServers", "err", updateError) return added, updated, deleted, createPlusAPIError(updateError) } @@ -298,7 +298,7 @@ func (r *ResourceService) UpdateHTTPUpstreamServers(ctx context.Context, instanc added, updated, deleted, updateError := plusClient.UpdateHTTPServers(ctx, upstream, servers) if updateError != nil { - slog.Warn("Error returned from NGINX Plus client, UpdateHTTPUpstreamServers", "err", updateError) + slog.WarnContext(ctx, "Error returned from NGINX Plus client, UpdateHTTPUpstreamServers", "err", updateError) } return added, updated, deleted, createPlusAPIError(updateError) diff --git a/internal/resource/resource_service_test.go b/internal/resource/resource_service_test.go index b60af6d23..0b82f1b18 100644 --- a/internal/resource/resource_service_test.go +++ b/internal/resource/resource_service_test.go @@ -306,23 +306,23 @@ func TestResourceService_ApplyConfig(t *testing.T) { { name: "Test 2: Failed reload", instanceID: protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(), - reloadErr: fmt.Errorf("something went wrong"), + reloadErr: errors.New("something went wrong"), validateErr: nil, - expected: fmt.Errorf("failed to reload NGINX %w", fmt.Errorf("something went wrong")), + expected: fmt.Errorf("failed to reload NGINX %w", errors.New("something went wrong")), }, { name: "Test 3: Failed validate", instanceID: protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(), reloadErr: nil, - validateErr: fmt.Errorf("something went wrong"), - expected: fmt.Errorf("failed validating config %w", fmt.Errorf("something went wrong")), + validateErr: errors.New("something went wrong"), + expected: fmt.Errorf("failed validating config %w", errors.New("something went wrong")), }, { name: "Test 4: Unknown instance ID", instanceID: "unknown", reloadErr: nil, validateErr: nil, - expected: fmt.Errorf("instance unknown not found"), + expected: errors.New("instance unknown not found"), }, } diff --git a/internal/watcher/credentials/credential_watcher_service_test.go b/internal/watcher/credentials/credential_watcher_service_test.go index 31ce8ae98..2d33ad253 100644 --- a/internal/watcher/credentials/credential_watcher_service_test.go +++ b/internal/watcher/credentials/credential_watcher_service_test.go @@ -7,7 +7,7 @@ package credentials import ( "context" - "fmt" + "errors" "os" "path" "testing" @@ -58,7 +58,7 @@ func TestCredentialWatcherService_Watch(t *testing.T) { } func() { - cws.watcher.Errors <- fmt.Errorf("watch error") + cws.watcher.Errors <- errors.New("watch error") }() } diff --git a/internal/watcher/file/file_watcher_service.go b/internal/watcher/file/file_watcher_service.go index 9b95fe7c7..dff86b117 100644 --- a/internal/watcher/file/file_watcher_service.go +++ b/internal/watcher/file/file_watcher_service.go @@ -195,7 +195,7 @@ func (fws *FileWatcherService) checkForUpdates(ctx context.Context, ch chan<- Fi // Check if directories no longer need to be watched fws.removeWatchers(ctx) - if fws.filesChanged.Load() { + if fws.filesChanged.Load() && fws.enabled.Load() { newCtx := context.WithValue( ctx, logger.CorrelationIDContextKey, diff --git a/internal/watcher/health/health_watcher_service_test.go b/internal/watcher/health/health_watcher_service_test.go index fb3fa97f0..b2ed9b78c 100644 --- a/internal/watcher/health/health_watcher_service_test.go +++ b/internal/watcher/health/health_watcher_service_test.go @@ -7,6 +7,7 @@ package health import ( "context" + "errors" "fmt" "reflect" "testing" @@ -105,7 +106,7 @@ func TestHealthWatcherService_health(t *testing.T) { fakePlusHealthOp.HealthReturns(protos.UnhealthyInstanceHealth(), nil) fakeUnspecifiedHealthOp := healthfakes.FakeHealthWatcherOperator{} - fakeUnspecifiedHealthOp.HealthReturns(nil, fmt.Errorf("unable to determine health")) + fakeUnspecifiedHealthOp.HealthReturns(nil, errors.New("unable to determine health")) watchers[plusInstance.GetInstanceMeta().GetInstanceId()] = &fakePlusHealthOp watchers[ossInstance.GetInstanceMeta().GetInstanceId()] = &fakeOSSHealthOp diff --git a/internal/watcher/health/nginx_health_watcher_operator.go b/internal/watcher/health/nginx_health_watcher_operator.go index 45afe31e7..0fdd75618 100644 --- a/internal/watcher/health/nginx_health_watcher_operator.go +++ b/internal/watcher/health/nginx_health_watcher_operator.go @@ -51,7 +51,7 @@ func (nhw *NginxHealthWatcher) Health(ctx context.Context, instance *mpi.Instanc } if len(instance.GetInstanceRuntime().GetInstanceChildren()) == 0 { - health.Description = fmt.Sprintf("%s, instance does not have enough children", health.GetDescription()) + health.Description = health.GetDescription() + ", instance does not have enough children" health.InstanceHealthStatus = mpi.InstanceHealth_INSTANCE_HEALTH_STATUS_DEGRADED } diff --git a/internal/watcher/instance/instance_watcher_service.go b/internal/watcher/instance/instance_watcher_service.go index d384a4f22..1bc50c496 100644 --- a/internal/watcher/instance/instance_watcher_service.go +++ b/internal/watcher/instance/instance_watcher_service.go @@ -115,7 +115,7 @@ func (iw *InstanceWatcherService) Watch( if iw.enabled.Load() { iw.checkForUpdates(ctx) } else { - slog.Debug("Skipping check for instance updates, instance watcher is disabled") + slog.DebugContext(ctx, "Skipping check for instance updates, instance watcher is disabled") } } } diff --git a/internal/watcher/instance/nginx-app-protect-instance-watcher.go b/internal/watcher/instance/nginx-app-protect-instance-watcher.go index 2f20efdcc..b9ad448e6 100644 --- a/internal/watcher/instance/nginx-app-protect-instance-watcher.go +++ b/internal/watcher/instance/nginx-app-protect-instance-watcher.go @@ -136,16 +136,16 @@ func (w *NginxAppProtectInstanceWatcher) addWatcher(ctx context.Context, version } func (w *NginxAppProtectInstanceWatcher) readVersionFile(ctx context.Context, versionFile string) { - switch { - case versionFile == versionFilePath: + switch versionFile { + case versionFilePath: w.version = w.readFile(ctx, versionFilePath) - case versionFile == releaseFilePath: + case releaseFilePath: w.release = w.readFile(ctx, releaseFilePath) - case versionFile == threatCampaignVersionFilePath: + case threatCampaignVersionFilePath: w.threatCampaignVersion = w.readFile(ctx, threatCampaignVersionFilePath) - case versionFile == enforcerEngineVersionFilePath: + case enforcerEngineVersionFilePath: w.enforcerEngineVersion = w.readFile(ctx, enforcerEngineVersionFilePath) - case versionFile == attackSignatureVersionFilePath: + case attackSignatureVersionFilePath: w.attackSignatureVersion = w.readFile(ctx, attackSignatureVersionFilePath) } } diff --git a/internal/watcher/instance/nginx_process_parser_test.go b/internal/watcher/instance/nginx_process_parser_test.go index fe0c29d81..889807960 100644 --- a/internal/watcher/instance/nginx_process_parser_test.go +++ b/internal/watcher/instance/nginx_process_parser_test.go @@ -8,6 +8,7 @@ package instance import ( "bytes" "context" + "errors" "fmt" "path/filepath" "sort" @@ -124,11 +125,11 @@ func TestNginxProcessParser_Parse(t *testing.T) { }{ { name: "Test 1: NGINX open source", - nginxVersionCommandOutput: fmt.Sprintf(`nginx version: nginx/1.25.3 + nginxVersionCommandOutput: `nginx version: nginx/1.25.3 built by clang 14.0.0 (clang-1400.0.29.202) built with OpenSSL 1.1.1s 1 Nov 2022 (running with OpenSSL 1.1.1t 7 Feb 2023) TLS SNI support enabled - configure arguments: %s`, ossArgs), + configure arguments: ` + ossArgs, expected: map[string]*mpi.Instance{ protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(): protos.NginxOssInstance( []string{expectedModules}), @@ -136,12 +137,12 @@ func TestNginxProcessParser_Parse(t *testing.T) { }, { name: "Test 2: NGINX plus", - nginxVersionCommandOutput: fmt.Sprintf(` + nginxVersionCommandOutput: ` nginx version: nginx/1.25.3 (nginx-plus-r31-p1) built by gcc 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.2) built with OpenSSL 1.1.1f 31 Mar 2020 TLS SNI support enabled - configure arguments: %s`, plusArgs), + configure arguments: ` + plusArgs, expected: map[string]*mpi.Instance{ protos.NginxPlusInstance([]string{}).GetInstanceMeta().GetInstanceId(): protos.NginxPlusInstance( []string{expectedModules}), @@ -149,11 +150,11 @@ func TestNginxProcessParser_Parse(t *testing.T) { }, { name: "Test 3: No Modules", - nginxVersionCommandOutput: fmt.Sprintf(`nginx version: nginx/1.25.3 + nginxVersionCommandOutput: `nginx version: nginx/1.25.3 built by clang 14.0.0 (clang-1400.0.29.202) built with OpenSSL 1.1.1s 1 Nov 2022 (running with OpenSSL 1.1.1t 7 Feb 2023) TLS SNI support enabled - configure arguments: %s`, noModuleArgs), + configure arguments: ` + noModuleArgs, expected: map[string]*mpi.Instance{ protos.NginxOssInstance([]string{}).GetInstanceMeta().GetInstanceId(): protos. NginxOssInstance(nil), @@ -185,7 +186,7 @@ func TestNginxProcessParser_Parse(t *testing.T) { } } - assert.Equal(tt, len(test.expected), len(result)) + assert.Len(tt, result, len(test.expected)) }) } } @@ -196,11 +197,11 @@ func TestNginxProcessParser_Parse_Processes(t *testing.T) { configArgs := fmt.Sprintf(ossConfigArgs, modulePath) - nginxVersionCommandOutput := fmt.Sprintf(`nginx version: nginx/1.25.3 + nginxVersionCommandOutput := `nginx version: nginx/1.25.3 built by clang 14.0.0 (clang-1400.0.29.202) built with OpenSSL 1.1.1s 1 Nov 2022 (running with OpenSSL 1.1.1t 7 Feb 2023) TLS SNI support enabled - configure arguments: %s`, configArgs) + configure arguments: ` + configArgs process1 := protos.NginxOssInstance(nil) instancesTest1 := map[string]*mpi.Instance{ @@ -391,7 +392,7 @@ func TestNginxProcessParser_Parse_Processes(t *testing.T) { assert.True(tt, proto.Equal(test.expected[id], instance)) } - assert.Equal(tt, len(test.expected), len(result)) + assert.Len(tt, result, len(test.expected)) }) } } @@ -420,12 +421,12 @@ func TestGetInfo(t *testing.T) { }{ { name: "Test 1: NGINX open source", - nginxVersionCommandOutput: fmt.Sprintf(` + nginxVersionCommandOutput: ` nginx version: nginx/1.25.3 built by clang 14.0.3 (clang-1403.0.22.14.1) built with OpenSSL 3.1.3 19 Sep 2023 (running with OpenSSL 3.2.0 23 Nov 2023) TLS SNI support enabled - configure arguments: %s`, ossArgs), + configure arguments: ` + ossArgs, process: &nginxprocess.Process{ PID: 1123, Exe: exePath, @@ -488,12 +489,12 @@ func TestGetInfo(t *testing.T) { }, { name: "Test 2: NGINX plus", - nginxVersionCommandOutput: fmt.Sprintf(` + nginxVersionCommandOutput: ` nginx version: nginx/1.25.3 (nginx-plus-r31-p1) built by gcc 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.2) built with OpenSSL 1.1.1f 31 Mar 2020 TLS SNI support enabled - configure arguments: %s`, plusArgs), + configure arguments: ` + plusArgs, process: &nginxprocess.Process{ PID: 3141, Exe: exePath, @@ -592,7 +593,7 @@ func TestNginxProcessParser_GetExe(t *testing.T) { { name: "Test 1: Default exe if error executing command -v nginx", commandOutput: []byte{}, - commandError: fmt.Errorf("command error"), + commandError: errors.New("command error"), expected: "/usr/bin/nginx", }, { @@ -622,8 +623,8 @@ func TestGetConfigPathFromCommand(t *testing.T) { assert.Equal(t, "/tmp/nginx.conf", result) result = confPathFromCommand("nginx: master process nginx -c") - assert.Equal(t, "", result) + assert.Empty(t, result) result = confPathFromCommand("") - assert.Equal(t, "", result) + assert.Empty(t, result) } diff --git a/pkg/files/file_helpers_benchmark_test.go b/pkg/files/file_helpers_benchmark_test.go index e7f0c0938..2ffcbac65 100644 --- a/pkg/files/file_helpers_benchmark_test.go +++ b/pkg/files/file_helpers_benchmark_test.go @@ -26,7 +26,7 @@ func BenchmarkGenerateConfigVersion(b *testing.B) { }, } - for i := 0; i < b.N; i++ { + for range b.N { files := []*mpi.File{ file1, file2, diff --git a/pkg/files/file_stream.go b/pkg/files/file_stream.go index 636582486..9f774adcd 100644 --- a/pkg/files/file_stream.go +++ b/pkg/files/file_stream.go @@ -40,7 +40,7 @@ func SendChunkedFile( // since "send" is synchronous and data in buffer re-marshaled, we shouldn't // need to reallocate for each chunk. buf := make([]byte, chunkSize) - for i := 0; i < chunkCount; i++ { + for i := range chunkCount { // using ReadFull here, since the Read may get partial fills depends on impl, due to Read being // at most once op per call. But we want to fill our buffer every loop, for the defined chunk size. // Set size of the buffer, so we don't get the error from io.ReadFull, if the data fits just right @@ -104,7 +104,7 @@ func recvContents( totalSize int, ) error { // receive and write all content chunks - for i := 0; i < chunkCount; i++ { + for i := range chunkCount { chunk, err := src.Recv() if err != nil { return fmt.Errorf("unable to receive chunk id %d: %w", i, err) diff --git a/pkg/files/file_stream_test.go b/pkg/files/file_stream_test.go index c4fe1bc1d..2c9e19dff 100644 --- a/pkg/files/file_stream_test.go +++ b/pkg/files/file_stream_test.go @@ -7,7 +7,7 @@ package files_test import ( "bytes" - "fmt" + "errors" "io" "math/rand" "testing" @@ -126,7 +126,7 @@ func TestSendChunkedFile(t *testing.T) { // we send up to the Size in the file meta content: randBytes(2300), clientFunc: func(cl *v1fakes.FakeFileService_GetFileStreamServer) { - cl.SendReturns(fmt.Errorf("error")) + cl.SendReturns(errors.New("error")) }, expectedErrString: "unable to send header chunk", }, @@ -146,7 +146,7 @@ func TestSendChunkedFile(t *testing.T) { clientFunc: func(cl *v1fakes.FakeFileService_GetFileStreamServer) { cl.SendCalls(func(chunk *v1.FileDataChunk) error { if cl.SendCallCount() > 1 { - return fmt.Errorf("foo") + return errors.New("foo") } return nil @@ -191,7 +191,7 @@ func TestSendChunkedFile(t *testing.T) { chunks := test.header.Header.GetChunks() assert.EqualValues(t, chunks+1, cl.SendCallCount()) - for i := 0; i < int(chunks+1); i++ { + for i := range int(chunks + 1) { arg := cl.SendArgsForCall(i) switch i { case 0: @@ -214,12 +214,12 @@ func TestSendChunkedFile(t *testing.T) { type badWriter struct{} func (b badWriter) Write(p []byte) (n int, err error) { - return 0, fmt.Errorf("error") + return 0, errors.New("error") } -// nolint: revive,govet,maintidx +//nolint:revive,govet,maintidx func TestRecvChunkedFile(t *testing.T) { - recvErr := fmt.Errorf("recv error") + recvErr := errors.New("recv error") type recvReturn struct { chunk *v1.FileDataChunk err error diff --git a/pkg/id/uuid.go b/pkg/id/uuid.go index 043b7a534..35140a7a5 100644 --- a/pkg/id/uuid.go +++ b/pkg/id/uuid.go @@ -7,6 +7,7 @@ package id import ( "crypto/sha256" + "encoding/hex" "fmt" "github.com/google/uuid" @@ -28,10 +29,16 @@ import ( // // A string representation of the generated UUID. func Generate(format string, a ...interface{}) string { + // need to set a default to avoid non-constant format string in call + f := "" + if format != "" { + f = format + } + h := sha256.New() - s := fmt.Sprintf(format, a...) + s := fmt.Sprintf(f, a...) _, _ = h.Write([]byte(s)) - id := fmt.Sprintf("%x", h.Sum(nil)) + id := hex.EncodeToString(h.Sum(nil)) return uuid.NewMD5(uuid.Nil, []byte(id)).String() } diff --git a/pkg/nginxprocess/process_test.go b/pkg/nginxprocess/process_test.go index d7c3c44f5..5af69b832 100644 --- a/pkg/nginxprocess/process_test.go +++ b/pkg/nginxprocess/process_test.go @@ -216,14 +216,14 @@ func BenchmarkList(b *testing.B) { b.Skipf("skipping to prevent CI flake") ctx := context.Background() b.Run("base", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { _, err := nginxprocess.List(ctx) require.NoError(b, err) } }) b.Run("WithStatus", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { _, err := nginxprocess.List(ctx, nginxprocess.WithStatus(true)) require.NoError(b, err) } diff --git a/pkg/tls/self_signed_cert.go b/pkg/tls/self_signed_cert.go index a7220549a..4410afd37 100644 --- a/pkg/tls/self_signed_cert.go +++ b/pkg/tls/self_signed_cert.go @@ -114,7 +114,8 @@ func GenerateCA(now time.Time, caCertPath string) (*x509.Certificate, *ecdsa.Pri // GenerateServerCerts creates a server CA, Cert and Key and writes them to specified destinations. // Hostnames are a list of subject alternative names. // If cert files are already present, does nothing, returns true. -// nolint: revive +// +//nolint:revive func GenerateServerCerts(hostnames []string, caPath, certPath, keyPath string) (existingCert bool, err error) { // Check for and return existing cert if it already exists existingCert, existingCertErr := DoesCertAlreadyExist(certPath) @@ -195,7 +196,7 @@ func DoesCertAlreadyExist(certPath string) (bool, error) { if _, certErr := os.Stat(certPath); certErr == nil { certBytes, certReadErr := os.ReadFile(certPath) if certReadErr != nil { - return false, fmt.Errorf("error reading existing certificate file") + return false, errors.New("error reading existing certificate file") } certPEM, _ := pem.Decode(certBytes) if certPEM == nil { diff --git a/pkg/tls/self_signed_cert_test.go b/pkg/tls/self_signed_cert_test.go index 64911d77a..bbcf0bdd1 100644 --- a/pkg/tls/self_signed_cert_test.go +++ b/pkg/tls/self_signed_cert_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" ) -// nolint: revive,gocognit +//nolint:revive,gocognit func TestGenerateSelfSignedCert(t *testing.T) { // Setup temp file paths caPath := "/tmp/test_ca.pem" diff --git a/scripts/packages/packager/Dockerfile b/scripts/packages/packager/Dockerfile index 51be5e23a..8353d7578 100644 --- a/scripts/packages/packager/Dockerfile +++ b/scripts/packages/packager/Dockerfile @@ -1,6 +1,6 @@ ARG package_type -FROM docker.io/golang:1.23-bullseye AS base +FROM docker.io/golang:1.24-bullseye AS base ARG PKG_VER="1.17.5" ARG PKG_DIR="/tmp/pkg" diff --git a/test/config/agent/nginx-agent-with-auxiliary-command.conf b/test/config/agent/nginx-agent-with-auxiliary-command.conf new file mode 100644 index 000000000..759e8b8fd --- /dev/null +++ b/test/config/agent/nginx-agent-with-auxiliary-command.conf @@ -0,0 +1,26 @@ +# +# /etc/nginx-agent/nginx-agent.conf +# +# Configuration file for NGINX Agent. +# + +log: + level: debug + +command: + server: + host: managementPlane + port: 9092 + type: grpc + +auxiliary_command: + server: + host: managementPlaneAuxiliary + port: 9095 + type: grpc + + +allowed_directories: + - /etc/nginx + - /usr/local/etc/nginx + - /usr/share/nginx/modules diff --git a/test/helpers/config_utils.go b/test/helpers/config_utils.go index 3620e36c0..f4e1c5850 100644 --- a/test/helpers/config_utils.go +++ b/test/helpers/config_utils.go @@ -75,7 +75,7 @@ func GenerateConfig(t testing.TB, outputFile string, targetSize int64) (fs.FileI server, serverErr := generateRandomString(serverLength) require.NoError(t, serverErr) - serverName := fmt.Sprintf("%s.com", server) + serverName := server + ".com" proxy, proxyErr := generateRandomString(proxyLength) require.NoError(t, proxyErr) diff --git a/test/helpers/go_utils.go b/test/helpers/go_utils.go index 5e161e482..ade1349e3 100644 --- a/test/helpers/go_utils.go +++ b/test/helpers/go_utils.go @@ -86,7 +86,7 @@ func generatePattern(n int) (string, error) { pattern.WriteString(currDir) pattern.WriteRune(os.PathSeparator) - for i := 0; i < n; i++ { + for range n { pattern.WriteString("..") pattern.WriteRune(os.PathSeparator) } diff --git a/test/helpers/go_utils_test.go b/test/helpers/go_utils_test.go index 2caab7090..fa5fc1f40 100644 --- a/test/helpers/go_utils_test.go +++ b/test/helpers/go_utils_test.go @@ -13,7 +13,7 @@ import ( ) func TestGoVersion(t *testing.T) { - expected := "1.23.0" + expected := "1.24.0" actual, err := GoVersion(t, 2) diff --git a/test/helpers/network_utils.go b/test/helpers/network_utils.go index c086ed78c..f5a1c38b0 100644 --- a/test/helpers/network_utils.go +++ b/test/helpers/network_utils.go @@ -6,6 +6,7 @@ package helpers import ( "crypto/rand" + "errors" "fmt" "math/big" "net" @@ -21,7 +22,7 @@ func RandomPort(t *testing.T) (int, error) { const maxPort = 65535 // try up to 10 times to get a random port - for i := 0; i < 10; i++ { + for range 10 { maxValue := &big.Int{} maxValue.SetInt64(maxPort - minPort + 1) @@ -37,7 +38,7 @@ func RandomPort(t *testing.T) (int, error) { } } - return 0, fmt.Errorf("could not find an available port after multiple attempts") + return 0, errors.New("could not find an available port after multiple attempts") } // isPortAvailable checks if a port is available by attempting to bind to it diff --git a/test/helpers/nginx_plus.go b/test/helpers/nginx_plus.go index 62be2956e..f51a8c420 100644 --- a/test/helpers/nginx_plus.go +++ b/test/helpers/nginx_plus.go @@ -20,7 +20,7 @@ const ( serverID = 1234 ) -// nolint: gocyclo,revive,cyclop,maintidx +//nolint:gocyclo,revive,cyclop,maintidx func NewMockNGINXPlusAPIServer(t *testing.T) *httptest.Server { t.Helper() return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { diff --git a/test/helpers/os_utils_test.go b/test/helpers/os_utils_test.go index b7d601580..a3b8d3688 100644 --- a/test/helpers/os_utils_test.go +++ b/test/helpers/os_utils_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" ) -// nolint: stylecheck +//nolint:staticcheck func TestRemoveASCIIControlSignals(t *testing.T) { tests := []struct { name string diff --git a/test/helpers/test_containers_utils.go b/test/helpers/test_containers_utils.go index 60f4c3000..baee47d40 100644 --- a/test/helpers/test_containers_utils.go +++ b/test/helpers/test_containers_utils.go @@ -263,6 +263,39 @@ func StartMockManagementPlaneGrpcContainer( return container } +func StartAuxiliaryMockManagementPlaneGrpcContainer(ctx context.Context, tb testing.TB, + containerNetwork *testcontainers.DockerNetwork, +) testcontainers.Container { + tb.Helper() + req := testcontainers.ContainerRequest{ + FromDockerfile: testcontainers.FromDockerfile{ + Context: "../../../", + Dockerfile: "./test/integration/auxiliarycommandserver/Dockerfile", + KeepImage: false, + PrintBuildLog: true, + }, + ExposedPorts: []string{"9095/tcp", "9096/tcp"}, + Networks: []string{ + containerNetwork.Name, + }, + NetworkAliases: map[string][]string{ + containerNetwork.Name: { + "managementPlaneAuxiliary", + }, + }, + WaitingFor: wait.ForLog("Starting mock management plane gRPC server"), + } + + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + + require.NoError(tb, err) + + return container +} + func ToPtr[T any](value T) *T { return &value } @@ -274,6 +307,7 @@ func LogAndTerminateContainers( mockManagementPlaneContainer testcontainers.Container, agentContainer testcontainers.Container, expectNoErrorsInLogs bool, + auxiliaryMockManagementPlaneContainer testcontainers.Container, ) { tb.Helper() @@ -285,6 +319,8 @@ func LogAndTerminateContainers( require.NoError(tb, err) logs := string(buf) + assert.NotContains(tb, logs, "manifest file is empty", + "Error reading manifest file found in agent log") tb.Log(logs) if expectNoErrorsInLogs { assert.NotContains(tb, logs, "level=ERROR", "agent log file contains logs at error level") @@ -307,4 +343,19 @@ func LogAndTerminateContainers( err = mockManagementPlaneContainer.Terminate(ctx) require.NoError(tb, err) } + + if auxiliaryMockManagementPlaneContainer != nil { + tb.Log("======================== Logging Auxiliary Mock Management Container Logs ========================") + logReader, err = auxiliaryMockManagementPlaneContainer.Logs(ctx) + require.NoError(tb, err) + + buf, err = io.ReadAll(logReader) + require.NoError(tb, err) + logs = string(buf) + + tb.Log(logs) + + err = auxiliaryMockManagementPlaneContainer.Terminate(ctx) + require.NoError(tb, err) + } } diff --git a/test/integration/auxiliarycommandserver/Dockerfile b/test/integration/auxiliarycommandserver/Dockerfile new file mode 100644 index 000000000..a4904e7ba --- /dev/null +++ b/test/integration/auxiliarycommandserver/Dockerfile @@ -0,0 +1,8 @@ +FROM debian:buster-slim + +WORKDIR /mock-management-plane-grpc +COPY ./build/mock-management-plane-grpc ./ + +RUN mkdir config/ + +CMD ["/mock-management-plane-grpc/server", "--grpcAddress", "0.0.0.0:9095", "--apiAddress", "0.0.0.0:9096", "--configDirectory", "/mock-management-plane-grpc/config", "--logLevel", "DEBUG"] diff --git a/test/integration/auxiliarycommandserver/connection_test.go b/test/integration/auxiliarycommandserver/connection_test.go new file mode 100644 index 000000000..4b2204daa --- /dev/null +++ b/test/integration/auxiliarycommandserver/connection_test.go @@ -0,0 +1,203 @@ +// Copyright (c) F5, Inc. +// +// This source code is licensed under the Apache License, Version 2.0 license found in the +// LICENSE file in the root directory of this source tree. + +package auxiliarycommandserver + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "sort" + "testing" + "time" + + "github.com/go-resty/resty/v2" + + mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "github.com/nginx/agent/v3/test/integration/utils" + "github.com/stretchr/testify/suite" +) + +type AuxiliaryTestSuite struct { + suite.Suite + teardownTest func(tb testing.TB) + instanceID string +} + +func (s *AuxiliaryTestSuite) SetupSuite() { + t := s.T() + // Expect errors in logs should be false for recconnection tests + // For now for these test we will skip checking the logs for errors + s.teardownTest = utils.SetupConnectionTest(t, false, false, true, + "../../config/agent/nginx-agent-with-auxiliary-command.conf") +} + +func (s *AuxiliaryTestSuite) TearDownSuite() { + s.teardownTest(s.T()) +} + +func TestSuite(t *testing.T) { + suite.Run(t, new(AuxiliaryTestSuite)) +} + +func (s *AuxiliaryTestSuite) TestAuxiliary_Test1_Connection() { + s.instanceID = utils.VerifyConnection(s.T(), 2, utils.MockManagementPlaneAPIAddress) + s.False(s.T().Failed()) + utils.VerifyUpdateDataPlaneHealth(s.T(), utils.MockManagementPlaneAPIAddress) + + utils.VerifyConnection(s.T(), 2, utils.AuxiliaryMockManagementPlaneAPIAddress) + s.False(s.T().Failed()) + utils.VerifyUpdateDataPlaneHealth(s.T(), utils.AuxiliaryMockManagementPlaneAPIAddress) + + commandResponses := utils.ManagementPlaneResponses(s.T(), 1, utils.MockManagementPlaneAPIAddress) + s.Equal(mpi.CommandResponse_COMMAND_STATUS_OK, commandResponses[0].GetCommandResponse().GetStatus()) + s.Equal("Successfully updated all files", commandResponses[0].GetCommandResponse().GetMessage()) +} + +func (s *AuxiliaryTestSuite) TestAuxiliary_Test2_Reconnection() { + ctx := context.Background() + timeout := 15 * time.Second + + originalID := utils.VerifyConnection(s.T(), 2, utils.AuxiliaryMockManagementPlaneAPIAddress) + stopErr := utils.AuxiliaryMockManagementPlaneGrpcContainer.Stop(context.Background(), &timeout) + + s.Require().NoError(stopErr) + + utils.VerifyConnection(s.T(), 2, utils.MockManagementPlaneAPIAddress) + s.False(s.T().Failed()) + + startErr := utils.AuxiliaryMockManagementPlaneGrpcContainer.Start(ctx) + s.Require().NoError(startErr) + + ipAddress, err := utils.AuxiliaryMockManagementPlaneGrpcContainer.Host(ctx) + s.Require().NoError(err) + ports, err := utils.AuxiliaryMockManagementPlaneGrpcContainer.Ports(ctx) + s.Require().NoError(err) + utils.AuxiliaryMockManagementPlaneAPIAddress = net.JoinHostPort(ipAddress, ports["9096/tcp"][0].HostPort) + + currentID := utils.VerifyConnection(s.T(), 2, utils.AuxiliaryMockManagementPlaneAPIAddress) + s.Equal(originalID, currentID) +} + +func (s *AuxiliaryTestSuite) TestAuxiliary_Test3_DataplaneHealthRequest() { + utils.ClearManagementPlaneResponses(s.T(), utils.MockManagementPlaneAPIAddress) + utils.ClearManagementPlaneResponses(s.T(), utils.AuxiliaryMockManagementPlaneAPIAddress) + + request := `{ + "message_meta": { + "message_id": "5d0fa83e-351c-4009-90cd-1f2acce2d184", + "correlation_id": "79794c1c-8e91-47c1-a92c-b9a0c3f1a263", + "timestamp": "2023-01-15T01:30:15.01Z" + }, + "health_request": {} + }` + + client := resty.New() + client.SetRetryCount(utils.RetryCount).SetRetryWaitTime(utils.RetryWaitTime).SetRetryMaxWaitTime( + utils.RetryMaxWaitTime) + + url := fmt.Sprintf("http://%s/api/v1/requests", utils.MockManagementPlaneAPIAddress) + resp, err := client.R().EnableTrace().SetBody(request).Post(url) + + s.Require().NoError(err) + s.Equal(http.StatusOK, resp.StatusCode()) + + // Check command server has 2 ManagementPlaneResponses as it has sent the request + commandResponses := utils.ManagementPlaneResponses(s.T(), 1, utils.MockManagementPlaneAPIAddress) + s.Equal(mpi.CommandResponse_COMMAND_STATUS_OK, commandResponses[0].GetCommandResponse().GetStatus()) + s.Equal("Successfully sent health status update", commandResponses[0].GetCommandResponse().GetMessage()) + s.False(s.T().Failed()) + + // Check auxiliary server still only has 1 ManagementPlaneResponses as it didn't send the request + utils.ManagementPlaneResponses(s.T(), 0, utils.AuxiliaryMockManagementPlaneAPIAddress) + s.False(s.T().Failed()) +} + +func (s *AuxiliaryTestSuite) TestAuxiliary_Test4_FileWatcher() { + // Clear any previous responses from previous tests + utils.ClearManagementPlaneResponses(s.T(), utils.MockManagementPlaneAPIAddress) + utils.ClearManagementPlaneResponses(s.T(), utils.AuxiliaryMockManagementPlaneAPIAddress) + ctx := context.Background() + + err := utils.Container.CopyFileToContainer( + ctx, + "../../config/nginx/nginx-with-server-block-access-log.conf", + "/etc/nginx/nginx.conf", + 0o666, + ) + s.Require().NoError(err) + + // Check command server has 2 ManagementPlaneResponses from updating a file on disk + commandResponses := utils.ManagementPlaneResponses(s.T(), 1, utils.MockManagementPlaneAPIAddress) + s.Equal(mpi.CommandResponse_COMMAND_STATUS_OK, commandResponses[0].GetCommandResponse().GetStatus()) + s.Equal("Successfully updated all files", commandResponses[0].GetCommandResponse().GetMessage()) + + // Check auxiliary server has 2 ManagementPlaneResponses from updating a file on disk + auxResponses := utils.ManagementPlaneResponses(s.T(), 1, utils.AuxiliaryMockManagementPlaneAPIAddress) + s.Equal(mpi.CommandResponse_COMMAND_STATUS_OK, auxResponses[0].GetCommandResponse().GetStatus()) + s.Equal("Successfully updated all files", auxResponses[0].GetCommandResponse().GetMessage()) +} + +func (s *AuxiliaryTestSuite) TestAuxiliary_Test5_ConfigApply() { + utils.ClearManagementPlaneResponses(s.T(), utils.MockManagementPlaneAPIAddress) + utils.ClearManagementPlaneResponses(s.T(), utils.AuxiliaryMockManagementPlaneAPIAddress) + + ctx := context.Background() + + newConfigFile := "../../config/nginx/nginx-with-test-location.conf" + + if os.Getenv("IMAGE_PATH") == "/nginx-plus/agent" { + newConfigFile = "../../config/nginx/nginx-plus-with-test-location.conf" + } + + err := utils.MockManagementPlaneGrpcContainer.CopyFileToContainer( + ctx, + newConfigFile, + fmt.Sprintf("/mock-management-plane-grpc/config/%s/etc/nginx/nginx.conf", s.instanceID), + 0o666, + ) + + s.Require().NoError(err) + + utils.PerformConfigApply(s.T(), s.instanceID, utils.MockManagementPlaneAPIAddress) + + commandResponses := utils.ManagementPlaneResponses(s.T(), 2, utils.MockManagementPlaneAPIAddress) + + sort.Slice(commandResponses, func(i, j int) bool { + return commandResponses[i].GetCommandResponse().GetMessage() < + commandResponses[j].GetCommandResponse().GetMessage() + }) + + s.Equal(mpi.CommandResponse_COMMAND_STATUS_OK, commandResponses[0].GetCommandResponse().GetStatus()) + s.Equal("Config apply successful", commandResponses[0].GetCommandResponse().GetMessage()) + s.Equal(mpi.CommandResponse_COMMAND_STATUS_OK, commandResponses[1].GetCommandResponse().GetStatus()) + s.Equal("Successfully updated all files", commandResponses[1].GetCommandResponse().GetMessage()) + + auxResponses := utils.ManagementPlaneResponses(s.T(), 1, utils.AuxiliaryMockManagementPlaneAPIAddress) + s.Equal(mpi.CommandResponse_COMMAND_STATUS_OK, auxResponses[0].GetCommandResponse().GetStatus()) + s.Equal("Successfully updated all files", auxResponses[0].GetCommandResponse().GetMessage()) + + // Check the config version is the same in both command and auxiliary servers + commandOverview := utils.CurrentFileOverview(s.T(), s.instanceID, utils.MockManagementPlaneAPIAddress) + auxOverview := utils.CurrentFileOverview(s.T(), s.instanceID, utils.AuxiliaryMockManagementPlaneAPIAddress) + s.Equal(commandOverview.GetConfigVersion(), auxOverview.GetConfigVersion()) +} + +func (s *AuxiliaryTestSuite) TestAuxiliary_Test6_ConfigApplyInvalid() { + utils.ClearManagementPlaneResponses(s.T(), utils.MockManagementPlaneAPIAddress) + utils.ClearManagementPlaneResponses(s.T(), utils.AuxiliaryMockManagementPlaneAPIAddress) + + utils.PerformConfigApply(s.T(), s.instanceID, utils.AuxiliaryMockManagementPlaneAPIAddress) + + commandResponses := utils.ManagementPlaneResponses(s.T(), 1, + utils.AuxiliaryMockManagementPlaneAPIAddress) + s.Equal(mpi.CommandResponse_COMMAND_STATUS_FAILURE, + commandResponses[0].GetCommandResponse().GetStatus()) + s.Equal("Config apply failed", commandResponses[0].GetCommandResponse().GetMessage()) + s.Equal("Unable to process request. Management plane is configured as read only.", + commandResponses[0].GetCommandResponse().GetError()) +} diff --git a/test/integration/installuninstall/install_uninstall_test.go b/test/integration/installuninstall/install_uninstall_test.go index 4432999e0..14b60a0f1 100644 --- a/test/integration/installuninstall/install_uninstall_test.go +++ b/test/integration/installuninstall/install_uninstall_test.go @@ -66,6 +66,7 @@ func installUninstallSetup(tb testing.TB, expectNoErrorsInLogs bool) (testcontai nil, testContainer, expectNoErrorsInLogs, + nil, ) } } @@ -154,7 +155,7 @@ func verifyAgentVersion(ctx context.Context, tb testing.TB, testContainer testco replacer := strings.NewReplacer("nginx-agent-", "v", "SNAPSHOT-", "") packageVersion := replacer.Replace(os.Getenv("PACKAGE_NAME")) - expectedVersionOutput := fmt.Sprintf("nginx-agent version %s", packageVersion) + expectedVersionOutput := "nginx-agent version " + packageVersion exitCode, cmdOut, err := testContainer.Exec(ctx, []string{"nginx-agent", "--version"}) require.NoError(tb, err) diff --git a/test/integration/managementplane/config_apply_test.go b/test/integration/managementplane/config_apply_test.go index a221ceb51..6fcb48096 100644 --- a/test/integration/managementplane/config_apply_test.go +++ b/test/integration/managementplane/config_apply_test.go @@ -26,20 +26,20 @@ const ( func TestGrpc_ConfigApply(t *testing.T) { ctx := context.Background() - teardownTest := utils.SetupConnectionTest(t, false, false, + teardownTest := utils.SetupConnectionTest(t, false, false, false, "../../config/agent/nginx-config-with-grpc-client.conf") defer teardownTest(t) - nginxInstanceID := utils.VerifyConnection(t, 2) + nginxInstanceID := utils.VerifyConnection(t, 2, utils.MockManagementPlaneAPIAddress) - responses := utils.ManagementPlaneResponses(t, 1) + responses := utils.ManagementPlaneResponses(t, 1, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[0].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[0].GetCommandResponse().GetMessage()) t.Run("Test 1: No config changes", func(t *testing.T) { - utils.ClearManagementPlaneResponses(t) - utils.PerformConfigApply(t, nginxInstanceID) - responses = utils.ManagementPlaneResponses(t, 1) + utils.ClearManagementPlaneResponses(t, utils.MockManagementPlaneAPIAddress) + utils.PerformConfigApply(t, nginxInstanceID, utils.MockManagementPlaneAPIAddress) + responses = utils.ManagementPlaneResponses(t, 1, utils.MockManagementPlaneAPIAddress) t.Logf("Config apply responses: %v", responses) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[0].GetCommandResponse().GetStatus()) @@ -47,7 +47,7 @@ func TestGrpc_ConfigApply(t *testing.T) { }) t.Run("Test 2: Valid config", func(t *testing.T) { - utils.ClearManagementPlaneResponses(t) + utils.ClearManagementPlaneResponses(t, utils.MockManagementPlaneAPIAddress) newConfigFile := "../../config/nginx/nginx-with-test-location.conf" if os.Getenv("IMAGE_PATH") == "/nginx-plus/agent" { @@ -62,9 +62,9 @@ func TestGrpc_ConfigApply(t *testing.T) { ) require.NoError(t, err) - utils.PerformConfigApply(t, nginxInstanceID) + utils.PerformConfigApply(t, nginxInstanceID, utils.MockManagementPlaneAPIAddress) - responses = utils.ManagementPlaneResponses(t, 2) + responses = utils.ManagementPlaneResponses(t, 2, utils.MockManagementPlaneAPIAddress) t.Logf("Config apply responses: %v", responses) sort.Slice(responses, func(i, j int) bool { @@ -78,7 +78,7 @@ func TestGrpc_ConfigApply(t *testing.T) { }) t.Run("Test 3: Invalid config", func(t *testing.T) { - utils.ClearManagementPlaneResponses(t) + utils.ClearManagementPlaneResponses(t, utils.MockManagementPlaneAPIAddress) err := utils.MockManagementPlaneGrpcContainer.CopyFileToContainer( ctx, "../../config/nginx/invalid-nginx.conf", @@ -87,9 +87,9 @@ func TestGrpc_ConfigApply(t *testing.T) { ) require.NoError(t, err) - utils.PerformConfigApply(t, nginxInstanceID) + utils.PerformConfigApply(t, nginxInstanceID, utils.MockManagementPlaneAPIAddress) - responses = utils.ManagementPlaneResponses(t, 2) + responses = utils.ManagementPlaneResponses(t, 2, utils.MockManagementPlaneAPIAddress) t.Logf("Config apply responses: %v", responses) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_ERROR, responses[0].GetCommandResponse().GetStatus()) @@ -101,10 +101,10 @@ func TestGrpc_ConfigApply(t *testing.T) { }) t.Run("Test 4: File not in allowed directory", func(t *testing.T) { - utils.ClearManagementPlaneResponses(t) + utils.ClearManagementPlaneResponses(t, utils.MockManagementPlaneAPIAddress) utils.PerformInvalidConfigApply(t, nginxInstanceID) - responses = utils.ManagementPlaneResponses(t, 1) + responses = utils.ManagementPlaneResponses(t, 1, utils.MockManagementPlaneAPIAddress) t.Logf("Config apply responses: %v", responses) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_FAILURE, responses[0].GetCommandResponse().GetStatus()) @@ -119,17 +119,17 @@ func TestGrpc_ConfigApply(t *testing.T) { func TestGrpc_ConfigApply_Chunking(t *testing.T) { ctx := context.Background() - teardownTest := utils.SetupConnectionTest(t, false, false, + teardownTest := utils.SetupConnectionTest(t, false, false, false, "../../config/agent/nginx-config-with-max-file-size.conf") defer teardownTest(t) - nginxInstanceID := utils.VerifyConnection(t, 2) + nginxInstanceID := utils.VerifyConnection(t, 2, utils.MockManagementPlaneAPIAddress) - responses := utils.ManagementPlaneResponses(t, 1) + responses := utils.ManagementPlaneResponses(t, 1, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[0].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[0].GetCommandResponse().GetMessage()) - utils.ClearManagementPlaneResponses(t) + utils.ClearManagementPlaneResponses(t, utils.MockManagementPlaneAPIAddress) newConfigFile := "../../config/nginx/nginx-1mb-file.conf" @@ -141,9 +141,9 @@ func TestGrpc_ConfigApply_Chunking(t *testing.T) { ) require.NoError(t, err) - utils.PerformConfigApply(t, nginxInstanceID) + utils.PerformConfigApply(t, nginxInstanceID, utils.MockManagementPlaneAPIAddress) - responses = utils.ManagementPlaneResponses(t, 2) + responses = utils.ManagementPlaneResponses(t, 2, utils.MockManagementPlaneAPIAddress) t.Logf("Config apply responses: %v", responses) sort.Slice(responses, func(i, j int) bool { diff --git a/test/integration/managementplane/config_upload_test.go b/test/integration/managementplane/config_upload_test.go index 3841f7d1a..35d57a628 100644 --- a/test/integration/managementplane/config_upload_test.go +++ b/test/integration/managementplane/config_upload_test.go @@ -19,14 +19,14 @@ import ( ) func TestGrpc_ConfigUpload(t *testing.T) { - teardownTest := utils.SetupConnectionTest(t, true, false, + teardownTest := utils.SetupConnectionTest(t, true, false, false, "../../config/agent/nginx-config-with-grpc-client.conf") defer teardownTest(t) - nginxInstanceID := utils.VerifyConnection(t, 2) + nginxInstanceID := utils.VerifyConnection(t, 2, utils.MockManagementPlaneAPIAddress) assert.False(t, t.Failed()) - responses := utils.ManagementPlaneResponses(t, 1) + responses := utils.ManagementPlaneResponses(t, 1, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[0].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[0].GetCommandResponse().GetMessage()) @@ -58,7 +58,7 @@ func TestGrpc_ConfigUpload(t *testing.T) { require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode()) - responses = utils.ManagementPlaneResponses(t, 2) + responses = utils.ManagementPlaneResponses(t, 2, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[0].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[0].GetCommandResponse().GetMessage()) diff --git a/test/integration/managementplane/file_watcher_test.go b/test/integration/managementplane/file_watcher_test.go index e7d13004e..5d6bb58f8 100644 --- a/test/integration/managementplane/file_watcher_test.go +++ b/test/integration/managementplane/file_watcher_test.go @@ -18,11 +18,11 @@ import ( func TestGrpc_FileWatcher(t *testing.T) { ctx := context.Background() - teardownTest := utils.SetupConnectionTest(t, true, false, + teardownTest := utils.SetupConnectionTest(t, true, false, false, "../../config/agent/nginx-config-with-grpc-client.conf") defer teardownTest(t) - utils.VerifyConnection(t, 2) + utils.VerifyConnection(t, 2, utils.MockManagementPlaneAPIAddress) assert.False(t, t.Failed()) t.Run("Test 1: update nginx config file on data plane", func(t *testing.T) { @@ -34,13 +34,13 @@ func TestGrpc_FileWatcher(t *testing.T) { ) require.NoError(t, err) - responses := utils.ManagementPlaneResponses(t, 2) + responses := utils.ManagementPlaneResponses(t, 2, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[0].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[0].GetCommandResponse().GetMessage()) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[1].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[1].GetCommandResponse().GetMessage()) - utils.VerifyUpdateDataPlaneStatus(t) + utils.VerifyUpdateDataPlaneStatus(t, utils.MockManagementPlaneAPIAddress) }) t.Run("Test 2: create new nginx config file", func(t *testing.T) { @@ -52,11 +52,11 @@ func TestGrpc_FileWatcher(t *testing.T) { ) require.NoError(t, err) - responses := utils.ManagementPlaneResponses(t, 3) + responses := utils.ManagementPlaneResponses(t, 3, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[2].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[2].GetCommandResponse().GetMessage()) - utils.VerifyUpdateDataPlaneStatus(t) + utils.VerifyUpdateDataPlaneStatus(t, utils.MockManagementPlaneAPIAddress) }) t.Run("Test 3: delete nginx config file", func(t *testing.T) { @@ -66,10 +66,10 @@ func TestGrpc_FileWatcher(t *testing.T) { ) require.NoError(t, err) - responses := utils.ManagementPlaneResponses(t, 4) + responses := utils.ManagementPlaneResponses(t, 4, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[3].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[3].GetCommandResponse().GetMessage()) - utils.VerifyUpdateDataPlaneStatus(t) + utils.VerifyUpdateDataPlaneStatus(t, utils.MockManagementPlaneAPIAddress) }) } diff --git a/test/integration/managementplane/grpc_management_plane_api_test.go b/test/integration/managementplane/grpc_management_plane_api_test.go index 85bc9b7a7..7bee15f28 100644 --- a/test/integration/managementplane/grpc_management_plane_api_test.go +++ b/test/integration/managementplane/grpc_management_plane_api_test.go @@ -23,13 +23,13 @@ import ( func TestGrpc_Reconnection(t *testing.T) { ctx := context.Background() - teardownTest := utils.SetupConnectionTest(t, false, false, + teardownTest := utils.SetupConnectionTest(t, false, false, false, "../../config/agent/nginx-config-with-grpc-client.conf") defer teardownTest(t) timeout := 15 * time.Second - originalID := utils.VerifyConnection(t, 2) + originalID := utils.VerifyConnection(t, 2, utils.MockManagementPlaneAPIAddress) stopErr := utils.MockManagementPlaneGrpcContainer.Stop(ctx, &timeout) @@ -44,29 +44,29 @@ func TestGrpc_Reconnection(t *testing.T) { require.NoError(t, err) utils.MockManagementPlaneAPIAddress = net.JoinHostPort(ipAddress, ports["9093/tcp"][0].HostPort) - currentID := utils.VerifyConnection(t, 2) + currentID := utils.VerifyConnection(t, 2, utils.MockManagementPlaneAPIAddress) assert.Equal(t, originalID, currentID) } // Verify that the agent sends a connection request and an update data plane status request func TestGrpc_StartUp(t *testing.T) { - teardownTest := utils.SetupConnectionTest(t, true, false, + teardownTest := utils.SetupConnectionTest(t, true, false, false, "../../config/agent/nginx-config-with-grpc-client.conf") defer teardownTest(t) - utils.VerifyConnection(t, 2) + utils.VerifyConnection(t, 2, utils.MockManagementPlaneAPIAddress) assert.False(t, t.Failed()) - utils.VerifyUpdateDataPlaneHealth(t) + utils.VerifyUpdateDataPlaneHealth(t, utils.MockManagementPlaneAPIAddress) } func TestGrpc_DataplaneHealthRequest(t *testing.T) { - teardownTest := utils.SetupConnectionTest(t, true, false, + teardownTest := utils.SetupConnectionTest(t, true, false, false, "../../config/agent/nginx-config-with-grpc-client.conf") defer teardownTest(t) - utils.VerifyConnection(t, 2) + utils.VerifyConnection(t, 2, utils.MockManagementPlaneAPIAddress) - responses := utils.ManagementPlaneResponses(t, 1) + responses := utils.ManagementPlaneResponses(t, 1, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[0].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully updated all files", responses[0].GetCommandResponse().GetMessage()) @@ -91,7 +91,7 @@ func TestGrpc_DataplaneHealthRequest(t *testing.T) { require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode()) - responses = utils.ManagementPlaneResponses(t, 2) + responses = utils.ManagementPlaneResponses(t, 2, utils.MockManagementPlaneAPIAddress) assert.Equal(t, mpi.CommandResponse_COMMAND_STATUS_OK, responses[1].GetCommandResponse().GetStatus()) assert.Equal(t, "Successfully sent health status update", responses[1].GetCommandResponse().GetMessage()) diff --git a/test/integration/nginxless/nginx_less_mpi_connection_test.go b/test/integration/nginxless/nginx_less_mpi_connection_test.go index 7e626521c..c979fadca 100644 --- a/test/integration/nginxless/nginx_less_mpi_connection_test.go +++ b/test/integration/nginxless/nginx_less_mpi_connection_test.go @@ -15,10 +15,10 @@ import ( // Verify that the agent sends a connection request to Management Plane even when Nginx is not present func TestNginxLessGrpc_Connection(t *testing.T) { - teardownTest := utils.SetupConnectionTest(t, true, true, + teardownTest := utils.SetupConnectionTest(t, true, true, false, "../../config/agent/nginx-config-with-grpc-client.conf") defer teardownTest(t) - utils.VerifyConnection(t, 1) + utils.VerifyConnection(t, 1, utils.MockManagementPlaneAPIAddress) assert.False(t, t.Failed()) } diff --git a/test/integration/utils/config_apply_utils.go b/test/integration/utils/config_apply_utils.go index ac86e3227..bc0c5acb5 100644 --- a/test/integration/utils/config_apply_utils.go +++ b/test/integration/utils/config_apply_utils.go @@ -11,6 +11,9 @@ import ( "testing" "time" + mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" + "google.golang.org/protobuf/encoding/protojson" + "github.com/go-resty/resty/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,19 +25,46 @@ const ( RetryMaxWaitTime = 6 * time.Second ) -var MockManagementPlaneAPIAddress string +var ( + MockManagementPlaneAPIAddress string + AuxiliaryMockManagementPlaneAPIAddress string +) -func PerformConfigApply(t *testing.T, nginxInstanceID string) { +func PerformConfigApply(t *testing.T, nginxInstanceID, mockManagementPlaneAPIAddress string) { t.Helper() client := resty.New() client.SetRetryCount(RetryCount).SetRetryWaitTime(RetryWaitTime).SetRetryMaxWaitTime(RetryMaxWaitTime) - url := fmt.Sprintf("http://%s/api/v1/instance/%s/config/apply", MockManagementPlaneAPIAddress, nginxInstanceID) + url := fmt.Sprintf("http://%s/api/v1/instance/%s/config/apply", mockManagementPlaneAPIAddress, nginxInstanceID) resp, err := client.R().EnableTrace().Post(url) + t.Logf("Config ApplyResponse: %s", resp.String()) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode()) +} + +func CurrentFileOverview(t *testing.T, nginxInstanceID, mockManagementPlaneAPIAddress string) *mpi.FileOverview { + t.Helper() + + client := resty.New() + client.SetRetryCount(RetryCount).SetRetryWaitTime(RetryWaitTime).SetRetryMaxWaitTime(RetryMaxWaitTime) + + url := fmt.Sprintf("http://%s/api/v1/instance/%s/config", mockManagementPlaneAPIAddress, nginxInstanceID) + resp, err := client.R().EnableTrace().Get(url) + require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode()) + + responseData := resp.Body() + + overview := mpi.GetOverviewResponse{} + + pb := protojson.UnmarshalOptions{DiscardUnknown: true} + unmarshalErr := pb.Unmarshal(responseData, &overview) + require.NoError(t, unmarshalErr) + + return overview.GetOverview() } func PerformInvalidConfigApply(t *testing.T, nginxInstanceID string) { diff --git a/test/integration/utils/grpc_management_plane_utils.go b/test/integration/utils/grpc_management_plane_utils.go index cd2e9d894..b79ef5de2 100644 --- a/test/integration/utils/grpc_management_plane_utils.go +++ b/test/integration/utils/grpc_management_plane_utils.go @@ -31,9 +31,11 @@ import ( ) var ( - Container testcontainers.Container - MockManagementPlaneGrpcContainer testcontainers.Container - MockManagementPlaneGrpcAddress string + Container testcontainers.Container + MockManagementPlaneGrpcContainer testcontainers.Container + AuxiliaryMockManagementPlaneGrpcContainer testcontainers.Container + MockManagementPlaneGrpcAddress string + AuxiliaryMockManagementPlaneGrpcAddress string ) const ( @@ -60,12 +62,14 @@ type ( } ) -func SetupConnectionTest(tb testing.TB, expectNoErrorsInLogs, nginxless bool, agentConfig string) func(tb testing.TB) { +func SetupConnectionTest(tb testing.TB, expectNoErrorsInLogs, nginxless, auxiliaryServer bool, + agentConfig string, +) func(tb testing.TB) { tb.Helper() ctx := context.Background() if os.Getenv("TEST_ENV") == "Container" { - setupContainerEnvironment(ctx, tb, nginxless, agentConfig) + setupContainerEnvironment(ctx, tb, nginxless, auxiliaryServer, agentConfig) } else { setupLocalEnvironment(tb) } @@ -80,18 +84,25 @@ func SetupConnectionTest(tb testing.TB, expectNoErrorsInLogs, nginxless bool, ag MockManagementPlaneGrpcContainer, Container, expectNoErrorsInLogs, + AuxiliaryMockManagementPlaneGrpcContainer, ) } } } // setupContainerEnvironment sets up the container environment for testing. -func setupContainerEnvironment(ctx context.Context, tb testing.TB, nginxless bool, agentConfig string) { +// nolint: revive +func setupContainerEnvironment(ctx context.Context, tb testing.TB, nginxless, auxiliaryServer bool, + agentConfig string, +) { tb.Helper() tb.Log("Running tests in a container environment") containerNetwork := createContainerNetwork(ctx, tb) setupMockManagementPlaneGrpc(ctx, tb, containerNetwork) + if auxiliaryServer { + setupAuxiliaryMockManagementPlaneGrpc(ctx, tb, containerNetwork) + } params := &helpers.Parameters{ NginxAgentConfigPath: agentConfig, @@ -134,6 +145,24 @@ func setupMockManagementPlaneGrpc(ctx context.Context, tb testing.TB, containerN tb.Logf("Mock management API server running on %s", MockManagementPlaneAPIAddress) } +func setupAuxiliaryMockManagementPlaneGrpc(ctx context.Context, tb testing.TB, + containerNetwork *testcontainers.DockerNetwork, +) { + tb.Helper() + AuxiliaryMockManagementPlaneGrpcContainer = helpers.StartAuxiliaryMockManagementPlaneGrpcContainer(ctx, + tb, containerNetwork) + AuxiliaryMockManagementPlaneGrpcAddress = "managementPlaneAuxiliary:9095" + tb.Logf("Auxiliary mock management gRPC server running on %s", AuxiliaryMockManagementPlaneGrpcAddress) + + ipAddress, err := AuxiliaryMockManagementPlaneGrpcContainer.Host(ctx) + require.NoError(tb, err) + ports, err := AuxiliaryMockManagementPlaneGrpcContainer.Ports(ctx) + require.NoError(tb, err) + + AuxiliaryMockManagementPlaneAPIAddress = net.JoinHostPort(ipAddress, ports["9096/tcp"][0].HostPort) + tb.Logf("Auxiliary mock management API server running on %s", AuxiliaryMockManagementPlaneAPIAddress) +} + // setupNginxContainer configures and starts the NGINX container. func setupNginxContainer( ctx context.Context, @@ -186,7 +215,9 @@ func setupLocalEnvironment(tb testing.TB) { }(tb) } -func ManagementPlaneResponses(t *testing.T, numberOfExpectedResponses int) []*mpi.DataPlaneResponse { +func ManagementPlaneResponses(t *testing.T, numberOfExpectedResponses int, + mockManagementPlaneAPIAddress string, +) []*mpi.DataPlaneResponse { t.Helper() client := resty.New() @@ -204,7 +235,7 @@ func ManagementPlaneResponses(t *testing.T, numberOfExpectedResponses int) []*mp }, ) - url := fmt.Sprintf("http://%s/api/v1/responses", MockManagementPlaneAPIAddress) + url := fmt.Sprintf("http://%s/api/v1/responses", mockManagementPlaneAPIAddress) resp, err := client.R().EnableTrace().Get(url) require.NoError(t, err) @@ -227,19 +258,19 @@ func ManagementPlaneResponses(t *testing.T, numberOfExpectedResponses int) []*mp return response } -func ClearManagementPlaneResponses(t *testing.T) { +func ClearManagementPlaneResponses(t *testing.T, mockManagementPlaneAPIAddress string) { t.Helper() client := resty.New() - url := fmt.Sprintf("http://%s/api/v1/responses", MockManagementPlaneAPIAddress) + url := fmt.Sprintf("http://%s/api/v1/responses", mockManagementPlaneAPIAddress) resp, err := client.R().EnableTrace().Delete(url) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.StatusCode()) } -func VerifyConnection(t *testing.T, instancesLength int) string { +func VerifyConnection(t *testing.T, instancesLength int, mockManagementPlaneAPIAddress string) string { t.Helper() client := resty.New() @@ -255,7 +286,7 @@ func VerifyConnection(t *testing.T, instancesLength int) string { return r.StatusCode() == http.StatusNotFound || unmarshalErr != nil }, ) - url := fmt.Sprintf("http://%s/api/v1/connection", MockManagementPlaneAPIAddress) + url := fmt.Sprintf("http://%s/api/v1/connection", mockManagementPlaneAPIAddress) t.Logf("Connecting to %s", url) resp, err := client.R().EnableTrace().Get(url) @@ -332,7 +363,7 @@ func VerifyConnection(t *testing.T, instancesLength int) string { return nginxInstanceID } -func VerifyUpdateDataPlaneHealth(t *testing.T) { +func VerifyUpdateDataPlaneHealth(t *testing.T, mockManagementPlaneAPIAddress string) { t.Helper() client := resty.New() @@ -346,7 +377,7 @@ func VerifyUpdateDataPlaneHealth(t *testing.T) { }, ) - url := fmt.Sprintf("http://%s/api/v1/health", MockManagementPlaneAPIAddress) + url := fmt.Sprintf("http://%s/api/v1/health", mockManagementPlaneAPIAddress) resp, err := client.R().EnableTrace().Get(url) @@ -393,14 +424,14 @@ func VerifyUpdateDataPlaneHealth(t *testing.T) { assert.Equal(t, mpi.InstanceHealth_INSTANCE_HEALTH_STATUS_HEALTHY, healths[0].GetInstanceHealthStatus()) } -func VerifyUpdateDataPlaneStatus(t *testing.T) { +func VerifyUpdateDataPlaneStatus(t *testing.T, mockManagementPlaneAPIAddress string) { t.Helper() client := resty.New() client.SetRetryCount(statusRetryCount).SetRetryWaitTime(retryWait).SetRetryMaxWaitTime(retryMaxWait) - url := fmt.Sprintf("http://%s/api/v1/status", MockManagementPlaneAPIAddress) + url := fmt.Sprintf("http://%s/api/v1/status", mockManagementPlaneAPIAddress) resp, err := client.R().EnableTrace().Get(url) diff --git a/test/load/otel_collector_plugin_load_test.go b/test/load/otel_collector_plugin_load_test.go index c3797d2e9..fa4db0b2b 100644 --- a/test/load/otel_collector_plugin_load_test.go +++ b/test/load/otel_collector_plugin_load_test.go @@ -23,7 +23,7 @@ func TestMetric10kDPS(t *testing.T) { binary := parseBinary(os.Getenv("PACKAGE_NAME")) - otelTestBedCollector, err := filepath.Abs(fmt.Sprintf("../../%s", binary)) + otelTestBedCollector, err := filepath.Abs("../../" + binary) require.NoError(t, err) t.Logf("Absolute path is %s", otelTestBedCollector) diff --git a/test/mock/collector/mock-collector/auth/auth.go b/test/mock/collector/mock-collector/auth/auth.go index 550e4000b..02531138a 100644 --- a/test/mock/collector/mock-collector/auth/auth.go +++ b/test/mock/collector/mock-collector/auth/auth.go @@ -33,7 +33,7 @@ type Option func(*HeadersCheck) // Ensure that the authenticator implements the auth.Server interface. var _ auth.Server = (*HeadersCheck)(nil) -// nolint: ireturn +//nolint:ireturn func NewFactory() extension.Factory { return extension.NewFactory( aType, @@ -56,7 +56,7 @@ func (a *HeadersCheck) Authenticate(ctx context.Context, headers map[string][]st return ctx, nil } -// nolint: ireturn +//nolint:ireturn func CreateAuthExtensionFunc( _ context.Context, setting extension.Settings, diff --git a/test/mock/collector/mock-collector/auth/config.go b/test/mock/collector/mock-collector/auth/config.go index a58401884..c6e087899 100644 --- a/test/mock/collector/mock-collector/auth/config.go +++ b/test/mock/collector/mock-collector/auth/config.go @@ -17,7 +17,7 @@ type Config struct { AuthenticatorID component.ID `mapstructure:",squash"` } -// nolint: ireturn +//nolint:ireturn func CreateDefaultConfig() component.Config { return &Config{ AuthenticatorID: HeadersCheckID, diff --git a/test/mock/collector/nginx-agent.conf b/test/mock/collector/nginx-agent.conf index 3194f4c77..dd870c390 100644 --- a/test/mock/collector/nginx-agent.conf +++ b/test/mock/collector/nginx-agent.conf @@ -26,6 +26,10 @@ allowed_directories: - /usr/local/etc/nginx - /usr/share/nginx/modules - /var/run/nginx + +labels: + product-type: mock-product + product-version: v1.0.0 client: http: diff --git a/test/mock/grpc/mock_management_command_service.go b/test/mock/grpc/mock_management_command_service.go index 3cae35f43..ff0d753af 100644 --- a/test/mock/grpc/mock_management_command_service.go +++ b/test/mock/grpc/mock_management_command_service.go @@ -109,12 +109,12 @@ func (cs *CommandService) CreateConnection( } func (cs *CommandService) UpdateDataPlaneStatus( - _ context.Context, + ctx context.Context, request *mpi.UpdateDataPlaneStatusRequest) ( *mpi.UpdateDataPlaneStatusResponse, error, ) { - slog.Debug("Update data plane status request", "request", request) + slog.DebugContext(ctx, "Update data plane status request", "request", request) if request == nil { return nil, errors.New("empty update data plane status request") @@ -128,12 +128,12 @@ func (cs *CommandService) UpdateDataPlaneStatus( } func (cs *CommandService) UpdateDataPlaneHealth( - _ context.Context, + ctx context.Context, request *mpi.UpdateDataPlaneHealthRequest) ( *mpi.UpdateDataPlaneHealthResponse, error, ) { - slog.Debug("Update data plane health request", "request", request) + slog.DebugContext(ctx, "Update data plane health request", "request", request) if request == nil { return nil, errors.New("empty update dataplane health request") @@ -181,9 +181,7 @@ func (cs *CommandService) handleConfigUploadRequest( instanceID := upload.ConfigUploadRequest.GetOverview().GetConfigVersion().GetInstanceId() overviewFiles := upload.ConfigUploadRequest.GetOverview().GetFiles() - if cs.instanceFiles[instanceID] == nil { - cs.instanceFiles[instanceID] = overviewFiles - } else { + if cs.instanceFiles[instanceID] != nil { filesToDelete := cs.checkForDeletedFiles(instanceID, overviewFiles) for _, fileToDelete := range filesToDelete { err := os.Remove(fileToDelete) @@ -192,6 +190,7 @@ func (cs *CommandService) handleConfigUploadRequest( } } } + cs.instanceFiles[instanceID] = overviewFiles } func (cs *CommandService) checkForDeletedFiles(instanceID string, overviewFiles []*mpi.File) []string { @@ -246,6 +245,7 @@ func (cs *CommandService) createServer(logger *slog.Logger) { cs.addHealthEndpoint() cs.addResponseAndRequestEndpoints() cs.addConfigApplyEndpoint() + cs.addConfigEndpoint() } func (cs *CommandService) addConnectionEndpoint() { @@ -386,6 +386,30 @@ func (cs *CommandService) addConfigApplyEndpoint() { }) } +func (cs *CommandService) addConfigEndpoint() { + cs.server.GET("/api/v1/instance/:instanceID/config", func(c *gin.Context) { + instanceID := c.Param("instanceID") + var data map[string]interface{} + + response := &mpi.GetOverviewResponse{ + Overview: &mpi.FileOverview{ + ConfigVersion: &mpi.ConfigVersion{ + InstanceId: instanceID, + Version: files.GenerateConfigVersion(cs.instanceFiles[instanceID]), + }, + Files: cs.instanceFiles[instanceID], + }, + } + + if err := json.Unmarshal([]byte(protojson.Format(response)), &data); err != nil { + slog.Error("Failed to return connection", "error", err) + c.JSON(http.StatusInternalServerError, nil) + } + + c.JSON(http.StatusOK, data) + }) +} + func (cs *CommandService) findInstanceConfigFiles(instanceID string) (configFiles []*mpi.File, err error) { instanceDirectory := filepath.Join(cs.configDirectory, instanceID) diff --git a/test/mock/grpc/mock_management_file_service.go b/test/mock/grpc/mock_management_file_service.go index d57e4e0d4..9ee302aae 100644 --- a/test/mock/grpc/mock_management_file_service.go +++ b/test/mock/grpc/mock_management_file_service.go @@ -16,12 +16,13 @@ import ( "path/filepath" "strconv" + "github.com/nginx/agent/v3/pkg/files" + "github.com/cenkalti/backoff/v4" backoffHelpers "github.com/nginx/agent/v3/internal/backoff" "github.com/nginx/agent/v3/internal/config" internalgrpc "github.com/nginx/agent/v3/internal/grpc" "github.com/nginx/agent/v3/internal/logger" - "github.com/nginx/agent/v3/pkg/files" "google.golang.org/grpc" "github.com/nginx/agent/v3/api/grpc/mpi/v1" @@ -54,15 +55,15 @@ func NewFileService(configDirectory string, requestChan chan *v1.ManagementPlane } func (mgs *FileService) GetOverview( - _ context.Context, + ctx context.Context, request *v1.GetOverviewRequest, ) (*v1.GetOverviewResponse, error) { configVersion := request.GetConfigVersion() - slog.Info("Getting overview", "config_version", configVersion) + slog.InfoContext(ctx, "Getting overview", "config_version", configVersion) if _, ok := mgs.instanceFiles[request.GetConfigVersion().GetInstanceId()]; !ok { - slog.Error("Config version not found", "config_version", configVersion) + slog.ErrorContext(ctx, "Config version not found", "config_version", configVersion) return nil, status.Errorf(codes.NotFound, "Config version not found") } @@ -76,7 +77,7 @@ func (mgs *FileService) GetOverview( // nolint: unparam func (mgs *FileService) UpdateOverview( - _ context.Context, + ctx context.Context, request *v1.UpdateOverviewRequest, ) (*v1.UpdateOverviewResponse, error) { overview := request.GetOverview() @@ -85,7 +86,7 @@ func (mgs *FileService) UpdateOverview( if errMarshaledJSON != nil { return nil, fmt.Errorf("failed to marshal struct back to JSON: %w", errMarshaledJSON) } - slog.Info("Updating overview JSON", "overview", marshaledJSON) + slog.InfoContext(ctx, "Updating overview JSON", "overview", marshaledJSON) mgs.instanceFiles[overview.GetConfigVersion().GetInstanceId()] = overview.GetFiles() @@ -107,24 +108,24 @@ func (mgs *FileService) UpdateOverview( } func (mgs *FileService) GetFile( - _ context.Context, + ctx context.Context, request *v1.GetFileRequest, ) (*v1.GetFileResponse, error) { fileName := request.GetFileMeta().GetName() fileHash := request.GetFileMeta().GetHash() - slog.Info("Getting file", "name", fileName, "hash", fileHash) + slog.InfoContext(ctx, "Getting file", "name", fileName, "hash", fileHash) fullFilePath := mgs.findFile(request.GetFileMeta()) if fullFilePath == "" { - slog.Error("File not found", "file_name", fileName) + slog.ErrorContext(ctx, "File not found", "file_name", fileName) return nil, status.Errorf(codes.NotFound, "File not found") } bytes, err := os.ReadFile(fullFilePath) if err != nil { - slog.Error("Failed to get file contents", "full_file_path", fullFilePath, "error", err) + slog.ErrorContext(ctx, "Failed to get file contents", "full_file_path", fullFilePath, "error", err) return nil, status.Errorf(codes.Internal, "Failed to get file contents") } @@ -162,6 +163,101 @@ func (mgs *FileService) GetFileStream(request *v1.GetFileRequest, streamingServer) } +func (mgs *FileService) UpdateFile( + ctx context.Context, + request *v1.UpdateFileRequest, +) (*v1.UpdateFileResponse, error) { + fileContents := request.GetContents().GetContents() + fileMeta := request.GetFile().GetFileMeta() + fileName := fileMeta.GetName() + fileHash := fileMeta.GetHash() + filePermissions := fileMeta.GetPermissions() + + slog.InfoContext(ctx, "Updating file", "name", fileName, "hash", fileHash) + + fullFilePath := mgs.findFile(request.GetFile().GetFileMeta()) + + if _, err := os.Stat(fullFilePath); os.IsNotExist(err) { + statErr := os.MkdirAll(filepath.Dir(fullFilePath), os.ModePerm) + if statErr != nil { + slog.InfoContext(ctx, "Failed to create/update file", "full_file_path", fullFilePath, "error", statErr) + return nil, status.Errorf(codes.Internal, "Failed to create/update file") + } + } + + err := os.WriteFile(fullFilePath, fileContents, fileMode(filePermissions)) + if err != nil { + slog.InfoContext(ctx, "Failed to create/update file", "full_file_path", fullFilePath, "error", err) + return nil, status.Errorf(codes.Internal, "Failed to create/update file") + } + + return &v1.UpdateFileResponse{ + FileMeta: fileMeta, + }, nil +} + +func (mgs *FileService) UpdateFileStream(streamingServer grpc.ClientStreamingServer[v1.FileDataChunk, + v1.UpdateFileResponse], +) error { + slog.Info("Updating file, stream") + + headerChunk, headerChunkErr := streamingServer.Recv() + if headerChunkErr != nil { + return headerChunkErr + } + + slog.Debug("File header chunk received", "header_chunk", headerChunk) + + header := headerChunk.GetHeader() + writeChunkedFileError := mgs.WriteChunkFile(header.GetFileMeta(), header, streamingServer) + if writeChunkedFileError != nil { + return writeChunkedFileError + } + + return nil +} + +func (mgs *FileService) WriteChunkFile(fileMeta *v1.FileMeta, header *v1.FileDataChunkHeader, + stream grpc.ClientStreamingServer[v1.FileDataChunk, v1.UpdateFileResponse], +) error { + fileName := mgs.findFile(fileMeta) + filePermissions := files.FileMode(fileMeta.GetPermissions()) + fullFilePath := mgs.findFile(fileMeta) + + if err := mgs.createDirectories(fullFilePath, filePermissions); err != nil { + return err + } + + fileToWrite, createError := os.Create(fullFilePath) + defer func() { + closeError := fileToWrite.Close() + if closeError != nil { + slog.Warn("Failed to close file", + "file", fileMeta.GetName(), + "error", closeError, + ) + } + }() + + if createError != nil { + return createError + } + slog.Debug("Writing chunked file", "file", fileName) + for range header.GetChunks() { + chunk, recvError := stream.Recv() + if recvError != nil { + return recvError + } + + _, chunkWriteError := fileToWrite.Write(chunk.GetContent().GetData()) + if chunkWriteError != nil { + return fmt.Errorf("error writing chunk to file %s: %w", fileMeta.GetName(), chunkWriteError) + } + } + + return nil +} + func (mgs *FileService) sendGetFileStreamChunks(ctx context.Context, fullFilePath, filePath string, chunkSize uint32, streamingServer grpc.ServerStreamingServer[v1.FileDataChunk], ) error { @@ -271,7 +367,7 @@ func (mgs *FileService) sendGetFileStreamChunk(ctx context.Context, chunk v1.Fil }) validatedError := internalgrpc.ValidateGrpcError(err) if validatedError != nil { - slog.Error("Failed to send get file stream chunk", "error", validatedError) + slog.ErrorContext(ctx, "Failed to send get file stream chunk", "error", validatedError) return validatedError } @@ -312,101 +408,6 @@ func readChunk( return chunk, err } -func (mgs *FileService) UpdateFile( - _ context.Context, - request *v1.UpdateFileRequest, -) (*v1.UpdateFileResponse, error) { - fileContents := request.GetContents().GetContents() - fileMeta := request.GetFile().GetFileMeta() - fileName := fileMeta.GetName() - fileHash := fileMeta.GetHash() - filePermissions := fileMeta.GetPermissions() - - slog.Info("Updating file", "name", fileName, "hash", fileHash) - - fullFilePath := mgs.findFile(request.GetFile().GetFileMeta()) - - if _, err := os.Stat(fullFilePath); os.IsNotExist(err) { - statErr := os.MkdirAll(filepath.Dir(fullFilePath), os.ModePerm) - if statErr != nil { - slog.Info("Failed to create/update file", "full_file_path", fullFilePath, "error", statErr) - return nil, status.Errorf(codes.Internal, "Failed to create/update file") - } - } - - err := os.WriteFile(fullFilePath, fileContents, fileMode(filePermissions)) - if err != nil { - slog.Info("Failed to create/update file", "full_file_path", fullFilePath, "error", err) - return nil, status.Errorf(codes.Internal, "Failed to create/update file") - } - - return &v1.UpdateFileResponse{ - FileMeta: fileMeta, - }, nil -} - -func (mgs *FileService) UpdateFileStream(streamingServer grpc.ClientStreamingServer[v1.FileDataChunk, - v1.UpdateFileResponse], -) error { - slog.Info("Updating file, stream") - - headerChunk, headerChunkErr := streamingServer.Recv() - if headerChunkErr != nil { - return headerChunkErr - } - - slog.Debug("File header chunk received", "header_chunk", headerChunk) - - header := headerChunk.GetHeader() - writeChunkedFileError := mgs.WriteChunkFile(header.GetFileMeta(), header, streamingServer) - if writeChunkedFileError != nil { - return writeChunkedFileError - } - - return nil -} - -func (mgs *FileService) WriteChunkFile(fileMeta *v1.FileMeta, header *v1.FileDataChunkHeader, - stream grpc.ClientStreamingServer[v1.FileDataChunk, v1.UpdateFileResponse], -) error { - fileName := mgs.findFile(fileMeta) - filePermissions := files.FileMode(fileMeta.GetPermissions()) - fullFilePath := mgs.findFile(fileMeta) - - if err := mgs.createDirectories(fullFilePath, filePermissions); err != nil { - return err - } - - fileToWrite, createError := os.Create(fullFilePath) - defer func() { - closeError := fileToWrite.Close() - if closeError != nil { - slog.Warn("Failed to close file", - "file", fileMeta.GetName(), - "error", closeError, - ) - } - }() - - if createError != nil { - return createError - } - slog.Debug("Writing chunked file", "file", fileName) - for i := uint32(0); i < header.GetChunks(); i++ { - chunk, recvError := stream.Recv() - if recvError != nil { - return recvError - } - - _, chunkWriteError := fileToWrite.Write(chunk.GetContent().GetData()) - if chunkWriteError != nil { - return fmt.Errorf("error writing chunk to file %s: %w", fileMeta.GetName(), chunkWriteError) - } - } - - return nil -} - func (mgs *FileService) createDirectories(fullFilePath string, filePermissions os.FileMode) error { if _, err := os.Stat(fullFilePath); os.IsNotExist(err) { statErr := os.MkdirAll(filepath.Dir(fullFilePath), filePermissions) diff --git a/test/mock/grpc/mock_management_server.go b/test/mock/grpc/mock_management_server.go index ad2b371a1..aff397395 100644 --- a/test/mock/grpc/mock_management_server.go +++ b/test/mock/grpc/mock_management_server.go @@ -52,6 +52,9 @@ var ( Time: keepAliveTime, Timeout: keepAliveTimeout, } + + errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") + errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid token") ) type MockManagementServer struct { @@ -146,6 +149,7 @@ func serverOptions(agentConfig *config.Config) []grpc.ServerOption { opts = append(opts, grpc.ChainUnaryInterceptor( grpcvalidator.UnaryServerInterceptor(), protovalidateInterceptor.UnaryServerInterceptor(validator), + logHeaders, ), ) } else { @@ -153,6 +157,7 @@ func serverOptions(agentConfig *config.Config) []grpc.ServerOption { grpcvalidator.UnaryServerInterceptor(), protovalidateInterceptor.UnaryServerInterceptor(validator), ensureValidToken, + logHeaders, ), ) } @@ -242,10 +247,6 @@ func reportHealth(healthcheck *health.Server, agentConfig *config.Config) { } func ensureValidToken(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { - var ( - errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") - errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid token") - ) md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, errMissingMetadata @@ -270,3 +271,14 @@ func valid(authorization []string) bool { // for a token matching an arbitrary string. return token == "1234" } + +func logHeaders(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMissingMetadata + } + + slog.InfoContext(ctx, "Request headers", "headers", md) + + return handler(ctx, req) +}