Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 17 additions & 1 deletion backend/internal/api/ws_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -1253,8 +1253,24 @@ func (h *WebSocketHandler) getMemoryInfo() (uint64, uint64) {
return memInfo.Used, memInfo.Total
}

// applyCgroupLimits applies cgroup limits when running in a container.
// applyCgroupLimits applies cgroup limits when running in an LXC (or similar)
// container where the limits represent the real hardware budget.
//
// It is intentionally a no-op inside Docker: Docker's --cpus / --memory flags
// set artificial cgroup constraints that are unrelated to the host totals we
// want to display. gopsutil already reads the correct host values there (via
// the bind-mounted /proc). Applying cgroup limits on top would produce the
// "#2343 regression" where the dashboard shows "512 MB RAM" while the host
// has 32 GB (#1110).
//
// In LXC the situation is the opposite: gopsutil reads the host's /proc
// (which shows the physical machine's RAM/CPU) rather than the slice of
// resources actually allocated to the LXC guest. The cgroup limits ARE the
// correct numbers to show.
func (h *WebSocketHandler) applyCgroupLimits(cpuCount int, memUsed, memTotal uint64) (int, uint64, uint64) {
if docker.IsDockerContainer() {
return cpuCount, memUsed, memTotal
}
cgroupLimits := h.getCachedCgroupLimitsInternal()
if cgroupLimits == nil {
return cpuCount, memUsed, memTotal
Expand Down
26 changes: 17 additions & 9 deletions backend/internal/huma/handlers/system.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,16 +286,24 @@ func (h *SystemHandler) GetDockerInfo(ctx context.Context, input *GetDockerInfoI
cpuCount := info.NCPU
memTotal := info.MemTotal

// Check for cgroup limits (LXC, Docker, etc.)
if cgroupLimits, err := docker.DetectCgroupLimits(); err == nil {
if limit := cgroupLimits.MemoryLimit; limit > 0 {
limitInt := int64(limit)
if memTotal == 0 || limitInt < memTotal {
memTotal = limitInt
// Apply cgroup limits only when running outside Docker (e.g. in LXC).
// In Docker, --cpus/--memory are artificial operator constraints that
// should not cap the host totals shown in the dashboard. The Docker
// daemon's NCPU/MemTotal already reflect the real host. In LXC the
// daemon may report the physical machine's full capacity while the
// LXC guest has a smaller cgroup budget — apply those limits so the
// dashboard shows what Arcane's host actually has available.
if !docker.IsDockerContainer() {
if cgroupLimits, err := docker.DetectCgroupLimits(); err == nil {
if limit := cgroupLimits.MemoryLimit; limit > 0 {
limitInt := int64(limit)
if memTotal == 0 || limitInt < memTotal {
memTotal = limitInt
}
}
if cgroupLimits.CPUCount > 0 && (cpuCount == 0 || cgroupLimits.CPUCount < cpuCount) {
cpuCount = cgroupLimits.CPUCount
}
}
if cgroupLimits.CPUCount > 0 && (cpuCount == 0 || cgroupLimits.CPUCount < cpuCount) {
cpuCount = cgroupLimits.CPUCount
}
}

Expand Down
24 changes: 24 additions & 0 deletions backend/pkg/dockerutil/cgroup_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,30 @@ var (
cgroupV2ContainerPattern = regexp.MustCompile(`docker-([a-f0-9]{64})\.scope`)
)

// IsDockerContainer reports whether the current process is running inside a
// Docker container (as opposed to an LXC container, a VM, or bare metal).
//
// The distinction matters for System Overview stats: in Docker the cgroup
// limits (--cpus / --memory) are artificial constraints set by the operator
// and should NOT be used as the host resource totals shown in the dashboard.
// In LXC, by contrast, the cgroup limits represent the real hardware budget
// assigned to the container — gopsutil reads the host's /proc values which are
// higher, so the cgroup limits must be applied to show correct figures.
//
// Detection: Docker always creates /.dockerenv inside every container it
// starts. LXC does not. We fall back to a /proc/self/cgroup pattern check
// as a secondary signal.
func IsDockerContainer() bool {
if _, err := os.Stat("/.dockerenv"); err == nil {
return true
}
data, err := os.ReadFile("/proc/self/cgroup")
if err != nil {
return false
}
return cgroupV1ContainerPattern.Match(data) || cgroupV2ContainerPattern.Match(data)
}

// GetCurrentContainerID detects the current container ID using multiple detection methods
// It tries cgroup, mountinfo, and hostname in that order
func GetCurrentContainerID() (string, error) {
Expand Down
20 changes: 10 additions & 10 deletions backend/pkg/libarcane/edge/ws_proxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,16 @@ func ProxyWebSocketRequest(c *gin.Context, tunnel *AgentTunnel, targetPath strin
streamCtx, cancel := context.WithCancel(ctx)
defer cancel()

// Register the stream before sending the start message so agent replies
// are never dropped when the routing goroutine is faster than this goroutine.
agentDataCh := make(chan *TunnelMessage, 512)
clientDoneCh := make(chan struct{})

tunnel.Pending.Store(streamID, &PendingRequest{
ResponseCh: agentDataCh,
})
defer tunnel.Pending.Delete(streamID)

headers := buildWebSocketHeaders(c.Request)
if err := sendWebSocketStart(tunnel, streamID, targetPath, c.Request.URL.RawQuery, headers); err != nil {
slog.ErrorContext(ctx, "Failed to send WebSocket start to agent", "error", err)
Expand All @@ -46,16 +56,6 @@ func ProxyWebSocketRequest(c *gin.Context, tunnel *AgentTunnel, targetPath strin
"path", targetPath,
)

// Create channels for bidirectional data
agentDataCh := make(chan *TunnelMessage, 512)
clientDoneCh := make(chan struct{})

// Register the stream to receive data from the agent
tunnel.Pending.Store(streamID, &PendingRequest{
ResponseCh: agentDataCh,
})
defer tunnel.Pending.Delete(streamID)

// Goroutine to read from client and send to agent
go forwardClientToAgent(ctx, streamCtx, clientWS, tunnel, streamID, clientDoneCh)

Expand Down
Loading