Skip to content

Commit 9425064

Browse files
authored
Merge pull request #9988 from rancher-sandbox/rddepman/golangci-lint/2.10.1-to-2.11.2
rddepman: bump golangci-lint from 2.10.1 to 2.11.2
2 parents 338e883 + 9874066 commit 9425064

File tree

3 files changed

+63
-45
lines changed

3 files changed

+63
-45
lines changed

pkg/rancher-desktop/assets/dependencies.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ helm: 4.1.1
1010
dockerCLI: 29.3.0
1111
dockerBuildx: 0.32.1
1212
dockerCompose: 5.1.0
13-
golangci-lint: 2.10.1
13+
golangci-lint: 2.11.2
1414
trivy: 0.69.3
1515
steve: 0.1.0-beta9.1
1616
rancherDashboard: 2.11.1.rd3

src/go/guestagent/main.go

Lines changed: 44 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,35 @@ func main() {
8888
log.Fatal("agent must run as root")
8989
}
9090

91+
if !*enableContainerd &&
92+
!*enableDocker {
93+
log.Fatal("requires either -docker or -containerd enabled.")
94+
}
95+
96+
if *enableContainerd &&
97+
*enableDocker {
98+
log.Fatal("requires either -docker or -containerd but not both.")
99+
}
100+
101+
if err := runAgent(
102+
*enableContainerd, *enableDocker, *enableKubernetes,
103+
*containerdSock, *configPath, *k8sServiceListenerAddr,
104+
*adminInstall, *k8sAPIPort, *tapIfaceIP,
105+
); err != nil {
106+
log.Fatal(err)
107+
}
108+
109+
log.Info("Rancher Desktop Agent Shutting Down")
110+
}
111+
112+
func runAgent(
113+
enableContainerd, enableDocker, enableKubernetes bool,
114+
containerdSock, configPath, k8sServiceListenerAddr string,
115+
adminInstall bool,
116+
k8sAPIPort, tapIfaceIP string,
117+
) error {
91118
groupCtx, cancel := context.WithCancel(context.Background())
119+
defer cancel()
92120
group, ctx := errgroup.WithContext(groupCtx)
93121

94122
sigCh := make(chan os.Signal, 1)
@@ -100,51 +128,41 @@ func main() {
100128
cancel()
101129
}()
102130

103-
if !*enableContainerd &&
104-
!*enableDocker {
105-
log.Fatal("requires either -docker or -containerd enabled.")
106-
}
107-
108-
if *enableContainerd &&
109-
*enableDocker {
110-
log.Fatal("requires either -docker or -containerd but not both.")
111-
}
112-
113131
var portTracker tracker.Tracker
114132

115133
wslProxyForwarder := forwarder.NewWSLProxyForwarder(ctx, "/run/wsl-proxy.sock")
116-
portTracker = tracker.NewAPITracker(ctx, wslProxyForwarder, tracker.GatewayBaseURL, *tapIfaceIP, *adminInstall)
134+
portTracker = tracker.NewAPITracker(ctx, wslProxyForwarder, tracker.GatewayBaseURL, tapIfaceIP, adminInstall)
117135
// Manually register the port for K8s API, we would
118136
// only want to send this manual port mapping if both
119137
// of the following conditions are met:
120138
// 1) if kubernetes is enabled
121139
// 2) when wsl-proxy for wsl-integration is enabled
122-
if *enableKubernetes {
123-
port, err := nat.NewPort("tcp", *k8sAPIPort)
140+
if enableKubernetes {
141+
port, err := nat.NewPort("tcp", k8sAPIPort)
124142
if err != nil {
125-
log.Fatalf("failed to parse port for k8s API: %v", err)
143+
return fmt.Errorf("failed to parse port for k8s API: %w", err)
126144
}
127145
k8sAPIPortMapping := types.PortMapping{
128146
Remove: false,
129147
Ports: nat.PortMap{
130148
port: []nat.PortBinding{
131149
{
132150
HostIP: "127.0.0.1",
133-
HostPort: *k8sAPIPort,
151+
HostPort: k8sAPIPort,
134152
},
135153
},
136154
},
137155
}
138156
if err := wslProxyForwarder.Send(k8sAPIPortMapping); err != nil {
139-
log.Fatalf("failed to send a static portMapping event to wsl-proxy: %v", err)
157+
return fmt.Errorf("failed to send a static portMapping event to wsl-proxy: %w", err)
140158
}
141-
log.Debugf("successfully forwarded k8s API port [%s] to wsl-proxy", *k8sAPIPort)
159+
log.Debugf("successfully forwarded k8s API port [%s] to wsl-proxy", k8sAPIPort)
142160
}
143161

144-
if *enableContainerd {
162+
if enableContainerd {
145163
group.Go(func() error {
146164
for {
147-
eventMonitor, err := containerd.NewEventMonitor(*containerdSock, portTracker)
165+
eventMonitor, err := containerd.NewEventMonitor(containerdSock, portTracker)
148166
if err != nil {
149167
return fmt.Errorf("error initializing containerd event monitor: %w", err)
150168
}
@@ -166,7 +184,7 @@ func main() {
166184
})
167185
}
168186

169-
if *enableDocker {
187+
if enableDocker {
170188
group.Go(func() error {
171189
for {
172190
eventMonitor, err := docker.NewEventMonitor(portTracker)
@@ -188,18 +206,18 @@ func main() {
188206
})
189207
}
190208

191-
if *enableKubernetes {
192-
k8sServiceListenerIP := net.ParseIP(*k8sServiceListenerAddr)
209+
if enableKubernetes {
210+
k8sServiceListenerIP := net.ParseIP(k8sServiceListenerAddr)
193211

194212
if k8sServiceListenerIP == nil || (!k8sServiceListenerIP.Equal(net.IPv4zero) && !k8sServiceListenerIP.Equal(net.IPv4(127, 0, 0, 1))) {
195-
log.Fatalf("empty or invalid input for Kubernetes service listener IP address %s. "+
196-
"Valid options are 0.0.0.0 and 127.0.0.1.", *k8sServiceListenerAddr)
213+
return fmt.Errorf("empty or invalid Kubernetes service listener IP address %s; "+
214+
"valid options are 0.0.0.0 and 127.0.0.1", k8sServiceListenerAddr)
197215
}
198216

199217
group.Go(func() error {
200218
// Watch for kube
201219
err := kube.WatchForServices(ctx,
202-
*configPath,
220+
configPath,
203221
k8sServiceListenerIP,
204222
portTracker)
205223
if err != nil {
@@ -227,11 +245,7 @@ func main() {
227245
return procScanner.ForwardPorts()
228246
})
229247

230-
if err := group.Wait(); err != nil {
231-
log.Fatal(err)
232-
}
233-
234-
log.Info("Rancher Desktop Agent Shutting Down")
248+
return group.Wait()
235249
}
236250

237251
func tryConnectAPI(ctx context.Context, socketFile string, verify func(context.Context) error) error {

src/go/networking/cmd/host/switch_windows.go

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -60,14 +60,6 @@ func main() {
6060
logrus.SetLevel(logrus.DebugLevel)
6161
}
6262

63-
// config flags
64-
ctx, cancel := context.WithCancel(context.Background())
65-
groupErrs, ctx := errgroup.WithContext(ctx)
66-
67-
// catch user issued signals
68-
sigChan := make(chan os.Signal, 1)
69-
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
70-
7163
subnet, err := config.ValidateSubnet(virtualSubnet)
7264
if err != nil {
7365
logrus.Fatal(err)
@@ -80,11 +72,26 @@ func main() {
8072
logrus.Fatal(err)
8173
}
8274

83-
cfg := newConfig(*subnet, portForwarding, debug)
75+
if err := runSwitch(*subnet, portForwarding); err != nil {
76+
logrus.Error(err)
77+
os.Exit(1)
78+
}
79+
}
80+
81+
func runSwitch(subnet config.Subnet, portForwarding map[string]string) error {
82+
ctx, cancel := context.WithCancel(context.Background())
83+
defer cancel()
84+
groupErrs, ctx := errgroup.WithContext(ctx)
85+
86+
// catch user issued signals
87+
sigChan := make(chan os.Signal, 1)
88+
signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
89+
90+
cfg := newConfig(subnet, portForwarding, debug)
8491

8592
ln, err := vsockHandshake(ctx, vsockHandshakePort, vsock.SignaturePhrase)
8693
if err != nil {
87-
logrus.Fatalf("handshake with peer process failed: %v", err)
94+
return fmt.Errorf("handshake with peer process failed: %w", err)
8895
}
8996

9097
logrus.Debugf("attempting to start a virtual network with the following config: %+v", cfg)
@@ -104,10 +111,7 @@ func main() {
104111
}
105112
})
106113
// Wait for all of the go funcs to finish up
107-
if err := groupErrs.Wait(); err != nil {
108-
logrus.Error(err)
109-
os.Exit(1)
110-
}
114+
return groupErrs.Wait()
111115
}
112116

113117
func run(ctx context.Context, g *errgroup.Group, cfg *types.Configuration, ln net.Listener) error {

0 commit comments

Comments
 (0)