Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions .buildkite/steps/check-code-committed.sh
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,14 @@ if ! lint_out="$(golangci-lint run --color=always)" ; then
echo "golangci-lint found the following issues:"
echo ""
echo "${lint_out}"
buildkite-agent annotate --style=warning <<EOF
buildkite-agent annotate --style=error <<EOF
golangci-lint found the following issues:

\`\`\`term
${lint_out}
\`\`\`
EOF
# While we're cleaning up things found by golangci-lint, don't fail if it
# finds things.
exit 0
exit 1
fi

echo +++ Everything is clean and tidy! 🎉
20 changes: 10 additions & 10 deletions agent/agent_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ func (a *AgentWorker) Start(ctx context.Context, idleMon *idleMonitor) (startErr
// If the job acquisition was rejected, we can exit with an error
// so that supervisor knows that the job was not acquired due to the job being rejected.
if errors.Is(err, core.ErrJobAcquisitionRejected) {
return fmt.Errorf("Failed to acquire job %q: %w", a.agentConfiguration.AcquireJob, err)
return fmt.Errorf("failed to acquire job %q: %w", a.agentConfiguration.AcquireJob, err)
}

// If the job itself exited with nonzero code, then we want to exit
Expand Down Expand Up @@ -652,9 +652,9 @@ func (a *AgentWorker) Ping(ctx context.Context) (job *api.Job, action string, er
// If a ping fails, we don't really care, because it'll
// ping again after the interval.
if a.stats.lastPing.IsZero() {
return nil, action, fmt.Errorf("Failed to ping: %w (No successful ping yet)", pingErr)
return nil, action, fmt.Errorf("failed to ping: %w (no successful ping yet)", pingErr)
} else {
return nil, action, fmt.Errorf("Failed to ping: %w (Last successful was %v ago)", pingErr, time.Since(a.stats.lastPing))
return nil, action, fmt.Errorf("failed to ping: %w (last successful was %v ago)", pingErr, time.Since(a.stats.lastPing))
}
}

Expand Down Expand Up @@ -748,7 +748,7 @@ func (a *AgentWorker) AcceptAndRunJob(ctx context.Context, job *api.Job, idleMon

// If `accepted` is nil, then the job was never accepted
if accepted == nil {
return fmt.Errorf("Failed to accept job: %w", err)
return fmt.Errorf("failed to accept job: %w", err)
}

// If we're disconnecting-after-job, signal back to Buildkite that we're not
Expand Down Expand Up @@ -791,17 +791,17 @@ func (a *AgentWorker) RunJob(ctx context.Context, acceptResponse *api.Job, ignor
KubernetesExec: a.agentConfiguration.KubernetesExec,
})
if err != nil {
return fmt.Errorf("Failed to initialize job: %w", err)
return fmt.Errorf("failed to initialize job: %w", err)
}
if !a.jobRunner.CompareAndSwap(nil, jr) {
return fmt.Errorf("Agent worker already has a job running")
return fmt.Errorf("agent worker already has a job running")
}
// No more job, no more runner.
defer a.jobRunner.Store(nil)

// Start running the job
if err := jr.Run(ctx, ignoreAgentInDispatches); err != nil {
return fmt.Errorf("Failed to run job: %w", err)
return fmt.Errorf("failed to run job: %w", err)
}

return nil
Expand All @@ -821,12 +821,12 @@ func (a *AgentWorker) healthHandler() http.HandlerFunc {

if a.stats.lastHeartbeatError != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "ERROR: last heartbeat failed: %v. last successful was %v ago", a.stats.lastHeartbeatError, time.Since(a.stats.lastHeartbeat))
fmt.Fprintf(w, "ERROR: last heartbeat failed: %v. last successful was %v ago", a.stats.lastHeartbeatError, time.Since(a.stats.lastHeartbeat)) //nolint:errcheck // status page writer; errors are non-actionable
} else {
if a.stats.lastHeartbeat.IsZero() {
fmt.Fprintf(w, "OK: no heartbeat yet")
fmt.Fprintf(w, "OK: no heartbeat yet") //nolint:errcheck // status page writer; errors are non-actionable
} else {
fmt.Fprintf(w, "OK: last heartbeat successful %v ago", time.Since(a.stats.lastHeartbeat))
fmt.Fprintf(w, "OK: last heartbeat successful %v ago", time.Since(a.stats.lastHeartbeat)) //nolint:errcheck // status page writer; errors are non-actionable
}
}
}
Expand Down
7 changes: 2 additions & 5 deletions agent/agent_worker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func TestDisconnect(t *testing.T) {
switch req.URL.Path {
case "/disconnect":
rw.WriteHeader(http.StatusOK)
fmt.Fprintf(rw, `{"id": "fakeuuid", "connection_state": "disconnected"}`)
fmt.Fprintf(rw, `{"id": "fakeuuid", "connection_state": "disconnected"}`) //nolint:errcheck // test handler
default:
t.Errorf("Unknown endpoint %s %s", req.Method, req.URL.Path)
http.Error(rw, "Not found", http.StatusNotFound)
Expand Down Expand Up @@ -203,7 +203,7 @@ func TestAcquireAndRunJobWaiting(t *testing.T) {

rw.Header().Set("Retry-After", fmt.Sprintf("%f", delay))
rw.WriteHeader(http.StatusLocked)
fmt.Fprintf(rw, `{"message": "Job waitinguuid is not yet eligible to be assigned"}`)
fmt.Fprintf(rw, `{"message": "Job waitinguuid is not yet eligible to be assigned"}`) //nolint:errcheck // test handler
default:
http.Error(rw, "Not found", http.StatusNotFound)
}
Expand Down Expand Up @@ -1011,9 +1011,6 @@ func TestAgentWorker_UnrecoverableErrorInPing(t *testing.T) {
server := NewFakeAPIServer()
defer server.Close()

const headerKey = "Buildkite-Hello"
const headerValue = "world"

agent := server.AddAgent(agentSessionToken)
agent.PingHandler = func(req *http.Request) (api.Ping, error) {
// Invalidate the token to trigger an unrecoverable error on
Expand Down
4 changes: 2 additions & 2 deletions agent/gcp_meta_data_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ func TestGCPMetaDataGetPaths(t *testing.T) {

switch path := r.URL.EscapedPath(); path {
case "/computeMetadata/v1/value":
fmt.Fprintf(w, "I could live on only burritos for the rest of my life")
fmt.Fprintf(w, "I could live on only burritos for the rest of my life") //nolint:errcheck // test handler
case "/computeMetadata/v1/nested/paths/work":
fmt.Fprintf(w, "Velociraptors are terrifying")
fmt.Fprintf(w, "Velociraptors are terrifying") //nolint:errcheck // test handler
default:
// NB: Do not use t.Fatal/Fatalf/FailNow from outside the test
// runner goroutine. See https://pkg.go.dev/testing#T.FailNow
Expand Down
2 changes: 1 addition & 1 deletion agent/integration/job_runner_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ func TestChunksIntervalSeconds_ControlsUploadTiming(t *testing.T) {
mb.Expect().Once().AndCallFunc(func(c *bintest.Call) {
start := time.Now()
for time.Since(start) < 4*time.Second {
fmt.Fprintf(c.Stdout, "Log output at %v\n", time.Now())
fmt.Fprintf(c.Stdout, "Log output at %v\n", time.Now()) //nolint:errcheck // test helper; write error is non-actionable
time.Sleep(100 * time.Millisecond)
}
c.Exit(0)
Expand Down
17 changes: 0 additions & 17 deletions agent/integration/job_verification_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -347,23 +347,6 @@ var (
},
Token: "bkaj_job-token",
}

jobWithMismatchedSecrets = api.Job{
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is unused? Should it be used?

ChunksMaxSizeBytes: 1024,
ID: defaultJobID,
Step: pipeline.CommandStep{
Command: "echo hello world",
Secrets: pipeline.Secrets{
pipeline.Secret{Key: "DATABASE_URL", EnvironmentVariable: "DATABASE_URL"},
},
},
Env: map[string]string{
"BUILDKITE_COMMAND": "echo hello world",
"BUILDKITE_REPO": defaultRepositoryURL,
"DEPLOY": "0",
},
Token: "bkaj_job-token",
}
)

func TestJobVerification(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion agent/integration/test_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ type route struct {
func (t *testAgentEndpoint) getJobsHandler() http.HandlerFunc {
return func(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(http.StatusOK)
fmt.Fprintf(rw, `{"state":"running"}`)
fmt.Fprintf(rw, `{"state":"running"}`) //nolint:errcheck // test handler
}
}

Expand Down
4 changes: 2 additions & 2 deletions agent/job_runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -906,8 +906,8 @@ func createJobEnvFiles(l logger.Logger, jobID string, kubernetesExec bool) (shel

jsonFile, err = os.CreateTemp(tempDir, fmt.Sprintf("job-env-json-%s", jobID))
if err != nil {
shellFile.Close()
os.Remove(shellFile.Name())
shellFile.Close() //nolint:errcheck // cleaning up on error path; Close error is secondary
os.Remove(shellFile.Name()) //nolint:errcheck // best-effort cleanup on error path
return nil, nil, err
}
l.Debug("[JobRunner] Created env file (JSON format): %s", jsonFile.Name())
Expand Down
2 changes: 1 addition & 1 deletion agent/log_streamer.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func NewLogStreamer(
// Start spins up a number of log streamer workers.
func (ls *LogStreamer) Start(ctx context.Context) error {
if ls.conf.MaxChunkSizeBytes <= 0 {
return errors.New("Maximum chunk size must be more than 0. No logs will be sent.")
return errors.New("maximum chunk size must be more than 0, no logs will be sent")
}

if ls.conf.MaxSizeBytes <= 0 {
Expand Down
16 changes: 8 additions & 8 deletions agent/pipeline_uploader.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ type PipelineUploader struct {
func (u *PipelineUploader) Upload(ctx context.Context, l logger.Logger) error {
result, err := u.pipelineUploadAsyncWithRetry(ctx, l)
if err != nil {
return fmt.Errorf("Failed to upload and accept pipeline: %w", err)
return fmt.Errorf("failed to upload and accept pipeline: %w", err)
}

// If the route does not support async uploads, and it did not error, then the pipeline
Expand All @@ -75,27 +75,27 @@ func (u *PipelineUploader) Upload(ctx context.Context, l logger.Logger) error {

jobIDFromResponse, uuidFromResponse, err := extractJobIdUUID(result.pipelineStatusURL.String())
if err != nil {
return fmt.Errorf("Failed to parse location to check status of pipeline: %w", err)
return fmt.Errorf("failed to parse location to check status of pipeline: %w", err)
}

if jobIDFromResponse != u.JobID {
return fmt.Errorf(
"JobID from API: %q does not match request: %s",
"jobID from API: %q does not match request: %s",
jobIDFromResponse,
u.JobID,
)
}

if uuidFromResponse != u.Change.UUID {
return fmt.Errorf(
"Pipeline Upload UUID from API: %q does not match request: %s",
"pipeline upload UUID from API: %q does not match request: %s",
uuidFromResponse,
u.Change.UUID,
)
}

if err := u.pollForPiplineUploadStatus(ctx, l); err != nil {
return fmt.Errorf("Failed to upload and process pipeline: %w", err)
return fmt.Errorf("failed to upload and process pipeline: %w", err)
}

return nil
Expand Down Expand Up @@ -208,17 +208,17 @@ func (u *PipelineUploader) pollForPiplineUploadStatus(ctx context.Context, l log
return nil
case "pending", "processing":
setNextIntervalFromResponse(r, resp)
err := fmt.Errorf("Pipeline upload not yet applied: %s", uploadStatus.State)
err := fmt.Errorf("pipeline upload not yet applied: %s", uploadStatus.State)
l.Info("%s (%s)", err, r)
return err
case "rejected", "failed":
l.Error("Unrecoverable error, skipping retries")
r.Break()
return fmt.Errorf("Pipeline upload %s: %s", uploadStatus.State, uploadStatus.Message)
return fmt.Errorf("pipeline upload %s: %s", uploadStatus.State, uploadStatus.Message)
default:
l.Error("Unrecoverable error, skipping retries")
r.Break()
return fmt.Errorf("Unexpected pipeline upload state from API: %s", uploadStatus.State)
return fmt.Errorf("unexpected pipeline upload state from API: %s", uploadStatus.State)
}
})
}
Expand Down
16 changes: 8 additions & 8 deletions agent/plugin/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func CreatePlugin(location string, config map[string]any) (*Plugin, error) {
plugin.Vendored = strings.HasPrefix(plugin.Location, ".")

if plugin.Version != "" && strings.Count(plugin.Version, "#") > 0 {
return nil, fmt.Errorf("Too many '#'s in %q", location)
return nil, fmt.Errorf("too many '#'s in %q", location)
}

if u.User != nil {
Expand Down Expand Up @@ -114,7 +114,7 @@ func CreateFromJSON(j string) ([]*Plugin, error) {
// Since there is a config, it's gotta be a hash
config, ok := config.(map[string]any)
if !ok {
return nil, fmt.Errorf("Configuration for \"%s\" is not a hash", location)
return nil, fmt.Errorf("configuration for %q is not a hash", location)
}

// Add the plugin with config to the array
Expand All @@ -127,7 +127,7 @@ func CreateFromJSON(j string) ([]*Plugin, error) {
}

default:
return nil, fmt.Errorf("Unknown type in plugin definition (%s)", vv)
return nil, fmt.Errorf("unknown type in plugin definition (%s)", vv)
}
}

Expand Down Expand Up @@ -248,7 +248,7 @@ func flattenConfigToEnvMap(into map[string]string, v any, envPrefix string) erro
return nil

default:
return fmt.Errorf("Unknown type %T %v", v, v)
return fmt.Errorf("unknown type %T %v", v, v)
}
}

Expand Down Expand Up @@ -338,24 +338,24 @@ func (p *Plugin) DisplayName() string {

func (p *Plugin) constructRepositoryHost() (string, error) {
if p.Location == "" {
return "", fmt.Errorf("Missing plugin location")
return "", fmt.Errorf("missing plugin location")
}

parts := strings.Split(p.Location, "/")
if len(parts) < 2 {
return "", fmt.Errorf("Incomplete plugin path %q", p.Location)
return "", fmt.Errorf("incomplete plugin path %q", p.Location)
}

switch parts[0] {
case "github.com", "bitbucket.org":
if len(parts) < 3 {
return "", fmt.Errorf("Incomplete plugin path %q", p.Location)
return "", fmt.Errorf("incomplete plugin path %q", p.Location)
}
return strings.Join(parts[:3], "/"), nil

case "gitlab.com":
if len(parts) < 3 {
return "", fmt.Errorf("Incomplete plugin path %q", p.Location)
return "", fmt.Errorf("incomplete plugin path %q", p.Location)
}
return strings.Join(parts, "/"), nil

Expand Down
24 changes: 13 additions & 11 deletions agent/run_job.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,14 +164,14 @@ func (r *JobRunner) Run(ctx context.Context, ignoreAgentInDispatches *bool) (err
default: // no error, all good, keep going
l := r.agentLogger.WithFields(logger.StringField("jobID", job.ID), logger.StringField("signature", job.Step.Signature.Value))
l.Info("Successfully verified job")
fmt.Fprintln(r.jobLogs, "~~~ ✅ Job signature verified")
fmt.Fprintf(r.jobLogs, "signature: %s\n", job.Step.Signature.Value)
fmt.Fprintln(r.jobLogs, "~~~ ✅ Job signature verified") //nolint:errcheck // job log write; errors are non-actionable
fmt.Fprintf(r.jobLogs, "signature: %s\n", job.Step.Signature.Value) //nolint:errcheck // job log write; errors are non-actionable
}
}

// Validate the repository if the list of allowed repositories is set.
if err := r.validateConfigAllowlists(job); err != nil {
fmt.Fprintln(r.jobLogs, err.Error())
fmt.Fprintln(r.jobLogs, err.Error()) //nolint:errcheck // job log write; errors are non-actionable
r.agentLogger.Error(err.Error())

exit.Status = -1
Expand All @@ -186,7 +186,7 @@ func (r *JobRunner) Run(ctx context.Context, ignoreAgentInDispatches *bool) (err
ok, err := r.executePreBootstrapHook(ctx, hook)
if !ok {
// Ensure the Job UI knows why this job resulted in failure
fmt.Fprintln(r.jobLogs, "pre-bootstrap hook rejected this job, see the buildkite-agent logs for more details")
fmt.Fprintln(r.jobLogs, "pre-bootstrap hook rejected this job, see the buildkite-agent logs for more details") //nolint:errcheck // job log write; errors are non-actionable
// But disclose more information in the agent logs
r.agentLogger.Error("pre-bootstrap hook rejected this job: %s", err)

Expand Down Expand Up @@ -297,21 +297,21 @@ func (r *JobRunner) verificationFailureLogs(behavior string, err error) {
}

l.Warn("Job verification failed")
fmt.Fprintf(r.jobLogs, "%s Job signature verification failed\n", prefix)
fmt.Fprintf(r.jobLogs, "error: %s\n", err)
fmt.Fprintf(r.jobLogs, "%s Job signature verification failed\n", prefix) //nolint:errcheck // job log write; errors are non-actionable
fmt.Fprintf(r.jobLogs, "error: %s\n", err) //nolint:errcheck // job log write; errors are non-actionable

if errors.Is(err, ErrNoSignature) {
fmt.Fprintln(r.jobLogs, "no signature in job")
fmt.Fprintln(r.jobLogs, "no signature in job") //nolint:errcheck // job log write; errors are non-actionable
} else if ise := new(invalidSignatureError); errors.As(err, &ise) {
fmt.Fprintf(r.jobLogs, "signature: %s\n", r.conf.Job.Step.Signature.Value)
fmt.Fprintf(r.jobLogs, "signature: %s\n", r.conf.Job.Step.Signature.Value) //nolint:errcheck // job log write; errors are non-actionable
} else if mke := new(missingKeyError); errors.As(err, &mke) {
fmt.Fprintf(r.jobLogs, "signature: %s\n", mke.signature)
fmt.Fprintf(r.jobLogs, "signature: %s\n", mke.signature) //nolint:errcheck // job log write; errors are non-actionable
}

if behavior == VerificationBehaviourWarn {
l.Warn("Job will be run whether or not it can be verified - this is not recommended.")
l.Warn("You can change this behavior with the `verification-failure-behavior` agent configuration option.")
fmt.Fprintln(r.jobLogs, "Job will be run without verification")
fmt.Fprintln(r.jobLogs, "Job will be run without verification") //nolint:errcheck // job log write; errors are non-actionable
}
}

Expand All @@ -320,7 +320,7 @@ func (r *JobRunner) runJob(ctx context.Context) core.ProcessExit {
// Run the process. This will block until it finishes.
if err := r.process.Run(ctx); err != nil {
// Send the error to job logs
fmt.Fprintf(r.jobLogs, "Error running job: %s\n", err)
fmt.Fprintf(r.jobLogs, "Error running job: %s\n", err) //nolint:errcheck // job log write; errors are non-actionable

// The process did not run at all, so make sure it fails
return core.ProcessExit{
Expand All @@ -335,10 +335,12 @@ func (r *JobRunner) runJob(ctx context.Context) core.ProcessExit {
if isK8s && !r.agentStopping.Load() {
switch {
case r.cancelled.Load() && k8sProcess.AnyClientIn(kubernetes.StateNotYetConnected):
//nolint:errcheck // job log write; errors are non-actionable
fmt.Fprint(r.jobLogs, `+++ Unknown container exit status
One or more containers never connected to the agent. Perhaps the container image specified in your podSpec could not be pulled (ImagePullBackOff)?
`)
case k8sProcess.AnyClientIn(kubernetes.StateLost):
//nolint:errcheck // job log write; errors are non-actionable
fmt.Fprint(r.jobLogs, `+++ Unknown container exit status
One or more containers connected to the agent, but then stopped communicating without exiting normally. Perhaps the container was OOM-killed?
`)
Expand Down
Loading