Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 91 additions & 11 deletions Sources/ContainerClient/Parser.swift
Original file line number Diff line number Diff line change
Expand Up @@ -554,6 +554,7 @@ public struct Parser {
/// Parse --publish-port arguments into PublishPort objects
/// The format of each argument is `[host-ip:]host-port:container-port[/protocol]`
/// (e.g., "127.0.0.1:8080:80/tcp")
/// host-port and container-port can be ranges (e.g., "127.0.0.1:3456-4567:3456-4567/tcp`
///
/// - Parameter rawPublishPorts: Array of port arguments
/// - Returns: Array of PublishPort objects
Expand All @@ -563,14 +564,14 @@ public struct Parser {

// Process each raw port string
for socket in rawPublishPorts {
let parsedSocket = try Parser.publishPort(socket)
sockets.append(parsedSocket)
let parsedSockets = try Parser.publishPort(socket)
sockets.append(contentsOf: parsedSockets)
}
return sockets
}

// Parse a single `--publish-port` argument into a `PublishPort`.
public static func publishPort(_ portText: String) throws -> PublishPort {
// Parse a single `--publish-port` argument into a `[PublishPort]`.
public static func publishPort(_ portText: String) throws -> [PublishPort] {
let protoSplit = portText.split(separator: "/")
let proto: PublishProtocol
let addressAndPortText: String
Expand Down Expand Up @@ -607,19 +608,98 @@ public struct Parser {
}

guard let hostPort = Int(hostPortText) else {
throw ContainerizationError(.invalidArgument, message: "invalid publish host port: \(hostPortText)")
let hostPortRangeStart: Int
let hostPortRangeEnd: Int
let containerPortRangeStart: Int
let containerPortRangeEnd: Int

let hostPortParts = hostPortText.split(separator: "-")
switch hostPortParts.count {
case 2:
guard let start = Int(hostPortParts[0]) else {
throw ContainerizationError(.invalidArgument, message: "invalid publish host port \(hostPortText)")
}

guard let end = Int(hostPortParts[1]) else {
throw ContainerizationError(.invalidArgument, message: "invalid publish host port \(hostPortText)")
}

hostPortRangeStart = start
hostPortRangeEnd = end
default:
throw ContainerizationError(.invalidArgument, message: "invalid publish host port \(hostPortText)")
}

let containerPortParts = containerPortText.split(separator: "-")
switch containerPortParts.count {
case 2:
guard let start = Int(containerPortParts[0]) else {
throw ContainerizationError(.invalidArgument, message: "invalid publish container port \(containerPortText)")
}

guard let end = Int(containerPortParts[1]) else {
throw ContainerizationError(.invalidArgument, message: "invalid publish container port \(containerPortText)")
}

containerPortRangeStart = start
containerPortRangeEnd = end
default:
throw ContainerizationError(.invalidArgument, message: "invalid publish container port \(containerPortText)")
}

guard hostPortRangeStart > 1,
hostPortRangeEnd > 1,
hostPortRangeStart < hostPortRangeEnd,
hostPortRangeEnd > hostPortRangeStart
else {
throw ContainerizationError(.invalidArgument, message: "invalid publish host port range \(hostPortText)")
}

guard containerPortRangeStart > 1,
containerPortRangeEnd > 1,
containerPortRangeStart < containerPortRangeEnd,
containerPortRangeEnd > containerPortRangeStart
else {
throw ContainerizationError(.invalidArgument, message: "invalid publish container port range \(containerPortText)")
}

let hostRange = hostPortRangeEnd - hostPortRangeStart
let containerRange = containerPortRangeEnd - containerPortRangeStart

guard hostRange == containerRange else {
throw ContainerizationError(.invalidArgument, message: "publish host and container port range are not equal \(addressAndPortText)")
}

var publishPorts = [PublishPort]()
for i in 0..<hostPortRangeEnd - hostPortRangeStart + 1 {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What happens if I specify -p 127.0.0.1:1024-65535:1024-65535?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you for your response!

I have a bunch of other things running on my machine but if I specify -p 7001-8020:7001-8020 -p 8022-49325:8022-49325 -p 58000-63000:58000-63000 -p 63765-65535:63765-65535 for a total of 49092 ports it works just fine and I cannot see any degraded performance.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Cool, thanks for doing the experiment.

Could you try the exact same command with Activity Monitor open and see what you see for memory utilization before and after?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since everything's getting multiplexed down onto NIO and an event loop group, we might be able to do this. I'll do a little asking around and see if folks more expert than me in this area can see any gotchas.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Memory usage when not publishing any ports:
Virtual Machine Service for container-runtime-linux: 179.4MB
container-runtime-linux: 20.5MB

Memory usage when publishing the same ports as in my answer above:
Virtual Machine Service for container-runtime-linux: 179.6MB
container-runtime-linux: 170.4MB

So the memory usage does increase significantly but I think 170.4MB is still acceptable. Especially considering that most people won't publish that many ports.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, I'm not surprised by that. It works out to a little under 3.5K worth of memory per port forward.

The way NIO works I don't think you'd see much performance degradation other than what might arise from cache misses if you're sending data concurrently through a lot of different ports at once.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Appreciate you following up with these tests. One other one to try: could you do the same memory experiment for the UDP case?

UDP is a bit different as we need to carry a bit of "connection state" in a LRU cache. I don't think it's a dealbreaker but it'd be good to characterize what goes on there.

I'm also working with some experts to review not your PR, but our NIO port proxy implementation, to make sure this won't break under load and see if we can reduce memory utilization.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The UDP case uses more memory:
Virtual Machine Service for container-runtime-linux: 179.5MB
container-runtime-linux: 222.4MB

Copy link
Contributor

@jglogan jglogan Oct 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, so another 1KB per proxy for the LRU cache entries and UDP proxy context.

I hope to know enough tomorrow to know whether there's work we need to do on the proxy implementation to make sure we can scale reliably.

let hostPort = hostPortRangeStart + i
let containerPort = containerPortRangeStart + i

publishPorts.append(
PublishPort(
hostAddress: hostAddress,
hostPort: hostPort,
containerPort: containerPort,
proto: proto
)
)
}

return publishPorts
}

guard let containerPort = Int(containerPortText) else {
throw ContainerizationError(.invalidArgument, message: "invalid publish container port: \(containerPortText)")
}

return PublishPort(
hostAddress: hostAddress,
hostPort: hostPort,
containerPort: containerPort,
proto: proto
)
return [
PublishPort(
hostAddress: hostAddress,
hostPort: hostPort,
containerPort: containerPort,
proto: proto
)
]
}

/// Parse --publish-socket arguments into PublishSocket objects
Expand Down
49 changes: 49 additions & 0 deletions Tests/CLITests/Subcommands/Run/TestCLIRunOptions.swift
Original file line number Diff line number Diff line change
Expand Up @@ -527,6 +527,55 @@ class TestCLIRunCommand: CLITest {
}
}

@Test func testForwardTCPPortRange() async throws {
let range = UInt16(10)
for portOffset in 0..<range {
let retries = 10
let retryDelaySeconds = Int64(3)
do {
let name = getLowercasedTestName()
let proxyIp = "127.0.0.1"
let proxyPortStart = UInt16.random(in: 50000..<55000)
let serverPortStart = UInt16.random(in: 55000..<60000)
let proxyPortEnd = proxyPortStart + range
let serverPortEnd = serverPortStart + range
try doLongRun(
name: name,
image: "docker.io/library/python:alpine",
args: ["--publish", "\(proxyIp):\(proxyPortStart)-\(proxyPortEnd):\(serverPortStart)-\(serverPortEnd)/tcp"],
containerArgs: ["python3", "-m", "http.server", "--bind", "0.0.0.0", "\(serverPortStart + portOffset)"])
defer {
try? doStop(name: name)
}

let url = "http://\(proxyIp):\(proxyPortStart + portOffset)"
var request = HTTPClientRequest(url: url)
request.method = .GET
let config = HTTPClient.Configuration(proxy: nil)
let client = HTTPClient(eventLoopGroupProvider: .singleton, configuration: config)
defer { _ = client.shutdown() }
var retriesRemaining = retries
var success = false
while !success && retriesRemaining > 0 {
do {
let response = try await client.execute(request, timeout: .seconds(retryDelaySeconds))
try #require(response.status == .ok)
success = true
} catch {
print("request to \(url) failed, error: \(error)")
try await Task.sleep(for: .seconds(retryDelaySeconds))
}
retriesRemaining -= 1
}
#expect(success, "Request to \(url) failed after \(retries - retriesRemaining) retries")
try doStop(name: name)
} catch {
Issue.record("failed to run container \(error)")
return
}
}
}

func getDefaultDomain() throws -> String? {
let (output, err, status) = try run(arguments: ["system", "property", "get", "dns.domain"])
try #require(status == 0, "default DNS domain retrieval returned status \(status): \(err)")
Expand Down