diff --git a/applecontainer-bridge/Package.swift b/applecontainer-bridge/Package.swift index 974fcf1..319abf6 100644 --- a/applecontainer-bridge/Package.swift +++ b/applecontainer-bridge/Package.swift @@ -15,6 +15,12 @@ let package = Package( name: "ACBridge", dependencies: [ .product(name: "ContainerAPIClient", package: "container"), + // PR-G2: BuildKit gRPC build flow. + .product(name: "ContainerBuild", package: "container"), + // ContainerImagesService exposes RemoteContentStoreClient + // (BuildKit's ContentStore implementation backed by the + // local images service). Required by Builder.BuildConfig. + .product(name: "ContainerImagesService", package: "container"), ], path: "Sources/ACBridge" ), diff --git a/applecontainer-bridge/Sources/ACBridge/Helpers.swift b/applecontainer-bridge/Sources/ACBridge/Helpers.swift index cb801a9..d566c93 100644 --- a/applecontainer-bridge/Sources/ACBridge/Helpers.swift +++ b/applecontainer-bridge/Sources/ACBridge/Helpers.swift @@ -63,15 +63,67 @@ func encodeOKNull() -> String { } // encodeErr serializes any Error into the failure envelope, escaping -// quotes and newlines so the result is always valid JSON. +// quotes and newlines so the result is always valid JSON. If the +// error is a BridgeCodedError, its `code` is included so the Go side +// can drive typed error mapping without depending on message text. func encodeErr(_ error: Error) -> String { - let msg = String(describing: error) - .replacingOccurrences(of: "\\", with: "\\\\") - .replacingOccurrences(of: "\"", with: "\\\"") - .replacingOccurrences(of: "\n", with: "\\n") + if let coded = error as? BridgeCodedError { + return encodeErrWithCode(coded.code, message: coded.message) + } + let msg = jsonEscape(String(describing: error)) return "{\"ok\":false,\"err\":\"\(msg)\"}" } +// encodeErrWithCode emits the failure envelope with a machine-readable +// `code` field alongside the human-readable `err`. The Go side keys +// typed errors off `code`; `err` is retained for diagnostics and +// log-friendliness. +func encodeErrWithCode(_ code: String, message: String) -> String { + let codeEsc = jsonEscape(code) + let msgEsc = jsonEscape(message) + return "{\"ok\":false,\"code\":\"\(codeEsc)\",\"err\":\"\(msgEsc)\"}" +} + +func encodeErrWithCode(_ code: String, error: Error) -> String { + return encodeErrWithCode(code, message: String(describing: error)) +} + +// jsonEscape produces a JSON-string-safe rendering of an arbitrary +// Swift string. Per RFC 7159 §7, every control character in +// U+0000–U+001F must be escaped; well-known shorthand forms are +// used where they exist, the rest fall back to \u00XX. Without this, +// an error message containing e.g. a tab or carriage return would +// emit invalid JSON and break Go-side envelope decoding. +private func jsonEscape(_ s: String) -> String { + var out = "" + out.reserveCapacity(s.unicodeScalars.count) + for scalar in s.unicodeScalars { + switch scalar.value { + case 0x22: out += "\\\"" + case 0x5C: out += "\\\\" + case 0x08: out += "\\b" + case 0x09: out += "\\t" + case 0x0A: out += "\\n" + case 0x0C: out += "\\f" + case 0x0D: out += "\\r" + case 0x00...0x1F: + out += String(format: "\\u%04X", scalar.value) + default: + out.unicodeScalars.append(scalar) + } + } + return out +} + +// BridgeCodedError attaches a stable `code` to an error so the Go +// side can route on it without parsing free-form message text. Throw +// this from any bridge handler where the caller benefits from a typed +// error (e.g. BUILDER_UNAVAILABLE). +struct BridgeCodedError: Error { + let code: String + let message: String +} + // readCString safely converts a possibly-null C string pointer into a // Swift String, returning nil for null pointers so each export can // short-circuit with a deterministic error envelope. diff --git a/applecontainer-bridge/Sources/ACBridge/build.swift b/applecontainer-bridge/Sources/ACBridge/build.swift new file mode 100644 index 0000000..6d85a57 --- /dev/null +++ b/applecontainer-bridge/Sources/ACBridge/build.swift @@ -0,0 +1,256 @@ +import ContainerAPIClient +import ContainerBuild +import ContainerImagesServiceClient +import ContainerizationError +import ContainerizationOCI +import Foundation +import Logging +import NIOCore +import NIOPosix + +// PR-G2: full BuildKit gRPC integration. Dials Apple's buildkit +// container over vsock, constructs a Builder, runs the build to an +// OCI tarball export, then loads + unpacks + tags it via the images +// service. +// +// Scope cuts intentionally not addressed in PR-G2: +// - Auto-start of the buildkit container. BuilderStart.start is +// module-internal to ContainerCommands, so we'd be reimplementing +// the bring-up logic ourselves. We surface a clean +// BuilderUnavailableError instead and rely on the user running +// `container builder start` once per machine. +// - Progress event streaming. BuildConfig accepts a Terminal? for +// output; passing nil routes progress to FileHandle.standardError, +// which the Go process inherits. A future PR can substitute a +// pipe + line-parsing for typed BuildEvent emission. +// - Multi-platform builds. We accept a single spec.platform string; +// anything else is dropped. +// +// Builder vsock port (matches the CLI's BuildCommand.swift default). +private let buildkitVsockPort: UInt32 = 8088 +private let buildkitContainerID = "buildkit" + +private let buildTimeoutSeconds = 30 * 60 // 30 min for a cold cache + +private struct BuildSpecJSON: Decodable { + var contextPath: String + // Dockerfile is the in-context relative path, NOT absolute. The + // engine resolves it relative to contextPath before sending. + var dockerfile: String? + var tag: String? + var args: [String: String]? + var target: String? + var cacheFrom: [String]? + var noCache: Bool? + var platform: String? +} + +private struct BuildResult: Encodable { + let reference: String + let digest: String +} + +// ac_build_probe is preserved from PR-G — used by the Go side to +// short-circuit with a typed BuilderUnavailableError before paying +// the cost of marshaling a BuildSpec. +@_cdecl("ac_build_probe") +public func ac_build_probe() -> UnsafePointer? { + return runSync(timeoutSeconds: 5) { + do { + let snap = try await ContainerClient().get(id: buildkitContainerID) + if snap.status == .running { + return "{\"ok\":true}" + } + return encodeErrWithCode( + "BUILDER_UNAVAILABLE", + message: "builder container exists but status is \(snap.status)" + ) + } catch { + return encodeErrWithCode("BUILDER_UNAVAILABLE", error: error) + } + } +} + +@_cdecl("ac_build") +public func ac_build(_ specPtr: UnsafePointer?) -> UnsafePointer? { + guard let specStr = readCString(specPtr) else { return dupNullArgErr("spec") } + return runSync(timeoutSeconds: buildTimeoutSeconds) { + do { + guard let data = specStr.data(using: .utf8) else { + return "{\"ok\":false,\"err\":\"spec not utf8\"}" + } + let spec = try JSONDecoder().decode(BuildSpecJSON.self, from: data) + let result = try await runBuild(spec: spec) + return encodeOK(result) + } catch { + return encodeErr(error) + } + } +} + +// runBuild is the actual build flow. Split out so the @_cdecl wrapper +// stays small and the resource cleanup (event loop, file handle, +// temp directory) is structured as a single function with defers. +private func runBuild(spec: BuildSpecJSON) async throws -> BuildResult { + let client = ContainerClient() + + // Resolve dockerfile contents. Defaults to "Dockerfile" in the + // context if not specified — matches Docker / OCI BuildKit + // conventions. + let dockerfileRel = (spec.dockerfile ?? "Dockerfile") + let dockerfileURL = URL(fileURLWithPath: spec.contextPath).appendingPathComponent(dockerfileRel) + let dockerfileData = try Data(contentsOf: dockerfileURL) + + // Dockerignore is optional; absent file is fine. + let dockerignoreURL = URL(fileURLWithPath: spec.contextPath).appendingPathComponent(".dockerignore") + let dockerignoreData = try? Data(contentsOf: dockerignoreURL) + + // Dial buildkit. If the container isn't running, surface a clean + // typed error path via the message (Go side maps to + // BuilderUnavailableError). + let socketHandle: FileHandle + do { + socketHandle = try await client.dial(id: buildkitContainerID, port: buildkitVsockPort) + } catch { + throw BridgeCodedError( + code: "BUILDER_UNAVAILABLE", + message: "builder not running (run `container builder start`): \(error)" + ) + } + + let eventLoopGroup = MultiThreadedEventLoopGroup(numberOfThreads: System.coreCount) + // Shutdown is handled explicitly before each return path below + // (after the build completes). Even with synchronous-on-completion + // shutdown, the gRPC client's internal graceful-shutdown has + // straggler work that occasionally produces "Cannot schedule + // tasks on an EventLoop that has already shut down" warnings. + // These are upstream SwiftNIO deprecation warnings rather than + // functional failures; a polish PR can pin a sequencing fix once + // grpc-swift exposes the right hook. + + var logger = Logger(label: "applecontainer-bridge.build") + logger.logLevel = .warning + + // Builder construction + info probe must both clean up the + // event loop on failure, otherwise we leak NIO threads. Surface + // either failure as BUILDER_UNAVAILABLE so the Go layer maps it + // to the typed error (same code as the dial failure above). + let builder: Builder + do { + builder = try Builder(socket: socketHandle, group: eventLoopGroup, logger: logger) + // Verify the builder responds. The CLI does this same probe + // right after construction. + _ = try await builder.info() + } catch { + try? await eventLoopGroup.shutdownGracefully() + throw BridgeCodedError( + code: "BUILDER_UNAVAILABLE", + message: "builder not reachable (run `container builder start`): \(error)" + ) + } + + // Export destination MUST live under the apiserver's appRoot at + // /builder//, which the apiserver mounts into + // the buildkit VM as /var/lib/container-builder-shim/exports//. + // Anywhere else fails with "no such file or directory" inside + // the VM. Source of truth: Application.BuilderCommand.builderResourceDir + // in apple/container's ContainerCommands/Builder/Builder.swift. + let buildID = UUID().uuidString + let systemHealth = try await ClientHealthCheck.ping(timeout: .seconds(10)) + let exportDir = systemHealth.appRoot + .appendingPathComponent("builder") + .appendingPathComponent(buildID) + try FileManager.default.createDirectory(at: exportDir, withIntermediateDirectories: true) + defer { try? FileManager.default.removeItem(at: exportDir) } + + let tarURL = exportDir.appendingPathComponent("out.tar") + let exports: [Builder.BuildExport] = [ + Builder.BuildExport( + type: "oci", + destination: tarURL, + additionalFields: [:], + rawValue: "type=oci,dest=\(tarURL.path)" + ) + ] + + // Parse the single-platform string (e.g. "linux/arm64") if given, + // else default to .current. BuildKit hangs indefinitely if + // platforms is empty — the CLI guards against this by always + // resolving a platform from CLI/env/host defaults. + let platforms = try parsePlatformsWithDefault(spec.platform) + + // Build args: BuildKit expects ["KEY=value", ...] form. + let buildArgs: [String] = (spec.args ?? [:]).map { "\($0.key)=\($0.value)" } + + let tag = spec.tag ?? "" + let tags: [String] = tag.isEmpty ? [] : [tag] + + // Matches the CLI's `progress=plain` path: terminal nil, quiet + // false. `quiet=true` stalls the Solve request before it leaves + // the client (observed empirically); plain progress + nil + // terminal makes BuildKit fall back to its internal logging + // (which goes to the builder VM's stderr — invisible to us). + let config = Builder.BuildConfig( + buildID: buildID, + contentStore: RemoteContentStoreClient(), + buildArgs: buildArgs, + secrets: [:], + contextDir: spec.contextPath, + dockerfile: dockerfileData, + dockerignore: dockerignoreData, + labels: [], + noCache: spec.noCache ?? false, + platforms: platforms, + terminal: nil, + tags: tags, + target: spec.target ?? "", + quiet: false, + exports: exports, + cacheIn: spec.cacheFrom ?? [], + cacheOut: [], + pull: false + ) + + do { + try await builder.build(config) + } catch { + try? await eventLoopGroup.shutdownGracefully() + throw error + } + // Shut down the gRPC event loop now that the build session is + // complete. Synchronous-on-completion (not deferred) so the + // gRPC client's graceful shutdown sees the loop alive long + // enough to finish without scheduling errors. + try? await eventLoopGroup.shutdownGracefully() + + // Build done — load the OCI tarball into the local content store, + // unpack, and tag. Mirrors the CLI's post-build "Unpacking built + // image" pass. + let loadResult = try await ClientImage.load(from: tarURL.path, force: false) + if !loadResult.rejectedMembers.isEmpty { + throw ContainerizationError( + .internalError, + message: "build archive contained rejected members: \(loadResult.rejectedMembers)" + ) + } + guard let firstImage = loadResult.images.first else { + throw ContainerizationError(.internalError, message: "build produced no images") + } + try await firstImage.unpack(platform: nil, progressUpdate: nil) + var tagged: ClientImage = firstImage + for tagName in tags { + tagged = try await firstImage.tag(new: tagName) + } + + return BuildResult( + reference: tags.first ?? tagged.description.reference, + digest: tagged.description.digest + ) +} + +private func parsePlatformsWithDefault(_ s: String?) throws -> [ContainerizationOCI.Platform] { + if let s, !s.isEmpty { + return [try ContainerizationOCI.Platform(from: s)] + } + return [.current] +} diff --git a/applecontainer-bridge/Sources/ACBridge/exec.swift b/applecontainer-bridge/Sources/ACBridge/exec.swift new file mode 100644 index 0000000..9681e4a --- /dev/null +++ b/applecontainer-bridge/Sources/ACBridge/exec.swift @@ -0,0 +1,212 @@ +import ContainerAPIClient +import ContainerResource +import Containerization +import Darwin +import Foundation + +// Handle table for in-flight exec processes. Each handle owns: +// - the ClientProcess for signaling/waiting +// - the FileHandles wrapping the caller-supplied stdio fds, kept +// alive so they aren't released while the apiserver-side dup is +// still active. +// +// Thread safety: NSLock around a dictionary. The synchronous @_cdecl +// entry points all touch the table once, so contention is negligible. +private final class ExecRegistry: @unchecked Sendable { + struct Entry { + let process: any ClientProcess + let stdio: [FileHandle?] + } + + private let lock = NSLock() + private var entries: [UInt64: Entry] = [:] + private var next: UInt64 = 1 + + func register(_ entry: Entry) -> UInt64 { + lock.lock() + defer { lock.unlock() } + let h = next + next &+= 1 + if next == 0 { next = 1 } // skip the sentinel value + entries[h] = entry + return h + } + + func get(_ h: UInt64) -> Entry? { + lock.lock() + defer { lock.unlock() } + return entries[h] + } + + func remove(_ h: UInt64) { + lock.lock() + defer { lock.unlock() } + entries.removeValue(forKey: h) + } +} + +private let execRegistry = ExecRegistry() + +// ExecOptsJSON is the wire shape from runtime.ExecOptions. +// +// stdio fds are passed as separate int arguments rather than nested +// in this JSON so the C ABI for ac_exec_start stays straightforward. +// A fd value of -1 means "no pipe" (e.g. nil stdin, or stderr in TTY +// mode where Apple merges into stdout). +private struct ExecOptsJSON: Decodable { + var cmd: [String] + var env: [String]? + var user: String? + var workingDir: String? + var tty: Bool? +} + +private struct ExecStartResult: Encodable { + let handle: UInt64 +} + +private struct ExecWaitResult: Encodable { + let exitCode: Int32 +} + +// Sync timeouts. exec_start should be fast (XPC round-trip). +// exec_wait blocks for the process duration, which is unbounded by +// design — the caller chooses the timeout per call. +private let execStartTimeoutSeconds = 30 + +// ===== ac_exec_start ================================================= + +@_cdecl("ac_exec_start") +public func ac_exec_start( + _ idPtr: UnsafePointer?, + _ optsPtr: UnsafePointer?, + _ stdinReadFd: Int32, + _ stdoutWriteFd: Int32, + _ stderrWriteFd: Int32 +) -> UnsafePointer? { + guard let containerId = readCString(idPtr) else { return dupNullArgErr("id") } + guard let optsStr = readCString(optsPtr) else { return dupNullArgErr("opts") } + + return runSync(timeoutSeconds: execStartTimeoutSeconds) { + do { + guard let optsData = optsStr.data(using: .utf8) else { + return "{\"ok\":false,\"err\":\"opts not utf8\"}" + } + let opts = try JSONDecoder().decode(ExecOptsJSON.self, from: optsData) + guard !opts.cmd.isEmpty else { + return "{\"ok\":false,\"err\":\"empty cmd\"}" + } + + let client = ContainerClient() + let snap = try await client.get(id: containerId) + + // Start from the init process config (inherits PATH, etc.) + // and override what the caller specified, matching the + // CLI's exec command (ContainerExec.swift). + var cfg = snap.configuration.initProcess + cfg.executable = opts.cmd[0] + cfg.arguments = Array(opts.cmd.dropFirst()) + cfg.terminal = opts.tty ?? false + if let e = opts.env, !e.isEmpty { + cfg.environment = e + } + if let cwd = opts.workingDir, !cwd.isEmpty { + cfg.workingDirectory = cwd + } + if let user = opts.user, !user.isEmpty { + if let parsed = parseUserForExec(user) { + cfg.user = parsed + } + } + + // Wrap caller-supplied fds in FileHandles. closeOnDealloc: + // false keeps fd lifetime in the caller's hands; XPC + // dup's the fd when serializing so the apiserver gets its + // own. After createProcess returns, the caller is free to + // close its end. + let stdio: [FileHandle?] = [ + fileHandleOrNil(stdinReadFd), + fileHandleOrNil(stdoutWriteFd), + fileHandleOrNil(stderrWriteFd), + ] + + let processId = "exec-" + UUID().uuidString.prefix(8) + let process = try await client.createProcess( + containerId: containerId, + processId: String(processId), + configuration: cfg, + stdio: stdio + ) + try await process.start() + + let handle = execRegistry.register(.init(process: process, stdio: stdio)) + return encodeOK(ExecStartResult(handle: handle)) + } catch { + return encodeErr(error) + } + } +} + +// ===== ac_exec_wait ================================================== + +@_cdecl("ac_exec_wait") +public func ac_exec_wait(_ handle: UInt64, _ timeoutSeconds: Int32) -> UnsafePointer? { + guard let entry = execRegistry.get(handle) else { + return UnsafePointer(strdup("{\"ok\":false,\"err\":\"unknown exec handle\"}")) + } + // timeoutSeconds <= 0 → effectively unbounded (Int32.max). The + // bridge's runSync still bounds the underlying wait via its hard + // cap, but for exec we want it large so legitimate long-running + // processes don't trip a bridge-side timeout. + let timeout = timeoutSeconds > 0 ? Int(timeoutSeconds) : Int(Int32.max / 1000) + return runSync(timeoutSeconds: timeout) { + do { + let code = try await entry.process.wait() + return encodeOK(ExecWaitResult(exitCode: code)) + } catch { + return encodeErr(error) + } + } +} + +// ===== ac_exec_signal ================================================ + +@_cdecl("ac_exec_signal") +public func ac_exec_signal(_ handle: UInt64, _ signal: Int32) -> UnsafePointer? { + guard let entry = execRegistry.get(handle) else { + return UnsafePointer(strdup("{\"ok\":false,\"err\":\"unknown exec handle\"}")) + } + return runSync(timeoutSeconds: 5) { + do { + try await entry.process.kill(signal) + return "{\"ok\":true}" + } catch { + return encodeErr(error) + } + } +} + +// ===== ac_exec_release =============================================== + +@_cdecl("ac_exec_release") +public func ac_exec_release(_ handle: UInt64) { + execRegistry.remove(handle) +} + +// ---- helpers -------------------------------------------------------- + +private func fileHandleOrNil(_ fd: Int32) -> FileHandle? { + if fd < 0 { return nil } + return FileHandle(fileDescriptor: fd, closeOnDealloc: false) +} + +private func parseUserForExec(_ s: String) -> ProcessConfiguration.User? { + let parts = s.split(separator: ":") + if parts.count == 2, + let uid = UInt32(parts[0]), + let gid = UInt32(parts[1]) + { + return .id(uid: uid, gid: gid) + } + return .raw(userString: s) +} diff --git a/applecontainer-bridge/Sources/ACBridge/lifecycle.swift b/applecontainer-bridge/Sources/ACBridge/lifecycle.swift new file mode 100644 index 0000000..8466be8 --- /dev/null +++ b/applecontainer-bridge/Sources/ACBridge/lifecycle.swift @@ -0,0 +1,271 @@ +import ContainerAPIClient +import ContainerResource +import Containerization +import ContainerizationOCI +import Foundation + +// Run-spec JSON wire shape — Go side marshals runtime.RunSpec into +// this. Apple-side fields we don't model yet (publishedPorts, +// resources, dns) get defaulted by ContainerConfiguration's +// initializers. Engine-level concepts we intentionally drop on this +// backend (RunArgs, Privileged, SecurityOpt) are documented in +// design/runtime-applecontainer.md §8. +private struct RunSpecJSON: Decodable { + var image: String + var id: String + var cmd: [String]? + var entrypoint: [String]? + var user: String? + var workingDir: String? + var env: [String]? + var labels: [String: String]? + var mounts: [MountJSON]? + var initProcess: Bool? + var capAdd: [String]? + var overrideCommand: Bool? +} + +private struct MountJSON: Decodable { + // type ∈ {"bind", "tmpfs", "volume"}; anything else returns an error. + var type: String + var source: String? + var target: String + var readOnly: Bool? + // ignored fields on this backend (Propagation, etc.) are not modeled. +} + +// Run-result wire shape — the Go side decodes this into runtime.Container. +private struct RunResult: Encodable { + var id: String +} + +// Default for sync lifecycle calls. Container creation can take a few +// seconds for kernel/init-image fetches; 60s is generous enough for +// cold first runs without leaving the cgo caller blocked indefinitely. +private let lifecycleTimeoutSeconds = 60 + +// ===== ac_run ======================================================== + +@_cdecl("ac_run") +public func ac_run(_ specPtr: UnsafePointer?) -> UnsafePointer? { + guard let specStr = readCString(specPtr) else { return dupNullArgErr("spec") } + return runSync(timeoutSeconds: lifecycleTimeoutSeconds) { + do { + guard let specData = specStr.data(using: .utf8) else { + return "{\"ok\":false,\"err\":\"spec not utf8\"}" + } + let spec = try JSONDecoder().decode(RunSpecJSON.self, from: specData) + try await runContainer(spec: spec) + return encodeOK(RunResult(id: spec.id)) + } catch { + return encodeErr(error) + } + } +} + +private func runContainer(spec: RunSpecJSON) async throws { + guard !spec.id.isEmpty else { + throw BridgeError.invalidArgument("RunSpec.id is required") + } + guard !spec.image.isEmpty else { + throw BridgeError.invalidArgument("RunSpec.image is required") + } + + // The image must already be in the local content store. Pull is + // PR-F's job; here we assume the caller has done it. + let img = try await ClientImage.get(reference: spec.image) + let imageConfig = try await img.config(for: .current).config + + let process = try buildProcessConfiguration(spec: spec, imageConfig: imageConfig) + + var cfg = ContainerConfiguration(id: spec.id, image: img.description, process: process) + cfg.platform = .current + cfg.labels = spec.labels ?? [:] + cfg.mounts = try (spec.mounts ?? []).map(toFilesystem) + cfg.capAdd = spec.capAdd ?? [] + cfg.useInit = spec.initProcess ?? false + + let kernel = try await ClientKernel.getDefaultKernel(for: .current) + let options = ContainerCreateOptions(autoRemove: false) + try await ContainerClient().create( + configuration: cfg, + options: options, + kernel: kernel + ) +} + +private func buildProcessConfiguration( + spec: RunSpecJSON, + imageConfig: ImageConfig? +) throws -> ProcessConfiguration { + let executable: String + let arguments: [String] + + if spec.overrideCommand ?? false { + // Engine sets OverrideCommand=true to make the container + // long-lived for exec. Matches the docker runtime's choice + // (runtime/runtime.go:228-231). + executable = "/bin/sh" + arguments = ["-c", "while sleep 1000; do :; done"] + } else { + // Merge image defaults with spec-provided cmd/entrypoint. + // Entrypoint replaces image's Entrypoint; Cmd replaces image's + // Cmd. If both are missing on spec side, fall back to image + // config. This matches OCI conventions and docker semantics. + let entry = spec.entrypoint ?? imageConfig?.entrypoint ?? [] + let cmd = spec.cmd ?? imageConfig?.cmd ?? [] + let combined = entry + cmd + guard let first = combined.first else { + throw BridgeError.invalidArgument("no executable: spec.cmd/entrypoint empty and image has none") + } + executable = first + arguments = Array(combined.dropFirst()) + } + + let env: [String] + if let e = spec.env, !e.isEmpty { + env = e + } else { + env = imageConfig?.env ?? [] + } + + let user = parseUser(spec.user) ?? imageConfigUser(imageConfig) + + return ProcessConfiguration( + executable: executable, + arguments: arguments, + environment: env, + workingDirectory: spec.workingDir ?? imageConfig?.workingDir ?? "/", + terminal: false, + user: user, + supplementalGroups: [], + rlimits: [] + ) +} + +// parseUser turns the textual user spec from RunSpec.User into the +// Codable form Apple wants. ":" → .id; anything else +// (e.g. "vscode" or "vscode:dev") → .raw, which the apiserver +// resolves inside the container. +private func parseUser(_ s: String?) -> ProcessConfiguration.User? { + guard let s, !s.isEmpty else { return nil } + let parts = s.split(separator: ":") + if parts.count == 2, + let uid = UInt32(parts[0]), + let gid = UInt32(parts[1]) + { + return .id(uid: uid, gid: gid) + } + return .raw(userString: s) +} + +private func imageConfigUser(_ cfg: ImageConfig?) -> ProcessConfiguration.User { + if let s = cfg?.user, !s.isEmpty { + return parseUser(s) ?? .raw(userString: s) + } + return .id(uid: 0, gid: 0) +} + +private func toFilesystem(_ m: MountJSON) throws -> Filesystem { + var options: MountOptions = [] + if m.readOnly ?? false { + options.append("ro") + } + switch m.type { + case "bind": + guard let src = m.source, !src.isEmpty else { + throw BridgeError.invalidArgument("bind mount requires source") + } + return .virtiofs(source: src, destination: m.target, options: options) + case "tmpfs": + return .tmpfs(destination: m.target, options: options) + case "volume": + // PR-C scope decision: treat named volumes as virtiofs binds + // against the supplied source. Real named-volume support (with + // the apiserver pre-creating the volume) is a later PR. + guard let src = m.source, !src.isEmpty else { + throw BridgeError.invalidArgument("volume mount on this backend currently requires source (named-volume support is deferred)") + } + return .virtiofs(source: src, destination: m.target, options: options) + default: + throw BridgeError.invalidArgument("unknown mount type \"\(m.type)\"") + } +} + +private enum BridgeError: LocalizedError { + case invalidArgument(String) + + var errorDescription: String? { + switch self { + case .invalidArgument(let m): return "invalid argument: \(m)" + } + } +} + +// ===== ac_start ====================================================== + +@_cdecl("ac_start") +public func ac_start(_ idPtr: UnsafePointer?) -> UnsafePointer? { + guard let id = readCString(idPtr) else { return dupNullArgErr("id") } + return runSync(timeoutSeconds: lifecycleTimeoutSeconds) { + do { + let client = ContainerClient() + let snap = try await client.get(id: id) + // Idempotency: if the container is already running, return + // success. Matches the CLI's behavior in + // ContainerStart.swift L60-72 and Docker's "start" on a + // running container (no-op). + if snap.status == .running { + return "{\"ok\":true}" + } + // Detached start: no stdio attachment. ProcessIO with + // detach=true returns [nil,nil,nil] for stdio; we replicate + // that directly without instantiating ProcessIO. + let process = try await client.bootstrap( + id: id, + stdio: [nil, nil, nil], + dynamicEnv: [:] + ) + try await process.start() + return "{\"ok\":true}" + } catch { + return encodeErr(error) + } + } +} + +// ===== ac_stop ======================================================= + +@_cdecl("ac_stop") +public func ac_stop(_ idPtr: UnsafePointer?, _ timeoutSeconds: Int32) -> UnsafePointer? { + guard let id = readCString(idPtr) else { return dupNullArgErr("id") } + return runSync(timeoutSeconds: lifecycleTimeoutSeconds) { + do { + // Apple's ContainerStopOptions has its own grace-period + // knob. timeoutSeconds <= 0 uses the API default (5s + // SIGTERM, then SIGKILL). + let opts: ContainerStopOptions = timeoutSeconds > 0 + ? .init(timeoutInSeconds: timeoutSeconds, signal: SIGTERM) + : .default + try await ContainerClient().stop(id: id, opts: opts) + return "{\"ok\":true}" + } catch { + return encodeErr(error) + } + } +} + +// ===== ac_delete ===================================================== + +@_cdecl("ac_delete") +public func ac_delete(_ idPtr: UnsafePointer?, _ force: Int32) -> UnsafePointer? { + guard let id = readCString(idPtr) else { return dupNullArgErr("id") } + return runSync(timeoutSeconds: lifecycleTimeoutSeconds) { + do { + try await ContainerClient().delete(id: id, force: force != 0) + return "{\"ok\":true}" + } catch { + return encodeErr(error) + } + } +} diff --git a/applecontainer-bridge/Sources/ACBridge/logs.swift b/applecontainer-bridge/Sources/ACBridge/logs.swift new file mode 100644 index 0000000..8e2ebca --- /dev/null +++ b/applecontainer-bridge/Sources/ACBridge/logs.swift @@ -0,0 +1,47 @@ +import ContainerAPIClient +import Darwin +import Foundation + +// Logs design notes: +// Apple's client.logs(id:) returns an array of FileHandles — index 0 +// is the container's stdio log, index 1 is the boot log. We always +// return stdio. Boot logs aren't part of the runtime.Runtime contract. +// +// The returned fd is dup(2)'d from Apple's FileHandle so that +// FileHandle deinit can close its end without affecting the Go-side +// reader. Go owns the dup'd fd from this point. +// +// Follow vs non-follow is implemented Go-side: the log is a regular +// file on disk; non-follow reads to EOF, follow polls after EOF until +// ctx cancellation closes the fd. Pushing the polling logic to Go +// keeps ctx cancellation simple (close fd → Read returns) and avoids +// another handle table. + +private struct LogsOpenResult: Encodable { + let fd: Int32 +} + +private let logsTimeoutSeconds = 10 + +@_cdecl("ac_logs_open") +public func ac_logs_open(_ idPtr: UnsafePointer?) -> UnsafePointer? { + guard let id = readCString(idPtr) else { return dupNullArgErr("id") } + return runSync(timeoutSeconds: logsTimeoutSeconds) { + do { + let handles = try await ContainerClient().logs(id: id) + guard let stdioHandle = handles.first else { + return "{\"ok\":false,\"err\":\"apiserver returned no log handles\"}" + } + // dup so Apple's FileHandle deinit doesn't kill the fd + // out from under the Go reader. + let dupFd = Darwin.dup(stdioHandle.fileDescriptor) + if dupFd < 0 { + let err = String(cString: strerror(errno)) + return "{\"ok\":false,\"err\":\"dup logs fd: \(err)\"}" + } + return encodeOK(LogsOpenResult(fd: dupFd)) + } catch { + return encodeErr(error) + } + } +} diff --git a/applecontainer-bridge/Sources/ACBridge/pull.swift b/applecontainer-bridge/Sources/ACBridge/pull.swift new file mode 100644 index 0000000..714925f --- /dev/null +++ b/applecontainer-bridge/Sources/ACBridge/pull.swift @@ -0,0 +1,40 @@ +import ContainerAPIClient +import ContainerizationOCI +import Foundation + +// PR-F scope: synchronous pull. Apple's ClientImage.pull returns when +// the entire image is fetched + unpacked. Progress streaming is left +// for a future PR — the Runtime interface accepts a BuildEvent +// channel, but the engine treats coarse "started / completed" as +// acceptable for v1. See design/runtime-applecontainer.md §8. +// +// Cancellation: not yet wired. Apple's pull API doesn't expose a +// cancellation token; aborting a pull cleanly would require deleting +// the partial image, which is risky if other pulls share the same +// content store. Documented limitation; revisit when DAP needs it. + +private struct PullResult: Encodable { + let reference: String + let digest: String +} + +// 30 minutes — covers a cold pull of a multi-GB base image on +// reasonable networks. The bridge will trip its own timeout before +// this fires on most realistic links. +private let pullTimeoutSeconds = 1800 + +@_cdecl("ac_pull_image") +public func ac_pull_image(_ refPtr: UnsafePointer?) -> UnsafePointer? { + guard let ref = readCString(refPtr) else { return dupNullArgErr("reference") } + return runSync(timeoutSeconds: pullTimeoutSeconds) { + do { + let img = try await ClientImage.pull(reference: ref, platform: .current) + return encodeOK(PullResult( + reference: img.description.reference, + digest: img.description.digest + )) + } catch { + return encodeErr(error) + } + } +} diff --git a/applecontainer-bridge/include/ac_bridge.h b/applecontainer-bridge/include/ac_bridge.h index 125474b..575c676 100644 --- a/applecontainer-bridge/include/ac_bridge.h +++ b/applecontainer-bridge/include/ac_bridge.h @@ -127,4 +127,238 @@ const char* ac_inspect_image(const char* reference); // Blocking: blocks until ContainerClient.list() returns. const char* ac_find_container_by_label(const char* key, const char* value); +// ---- PR-C: Run / Start / Stop / Delete ----------------------------- + +// ac_run creates a container from a JSON-encoded RunSpec. Wraps +// ContainerClient.create + ClientKernel.getDefaultKernel. Image must +// already be in the local content store (PullImage handled by PR-F). +// +// Ownership: caller frees with ac_free. +// Cancellation: not yet wired (PR-D). +// Threading: Swift Task + DispatchSemaphore wait on the cgo thread. +// Encoding: spec_json is the canonical RunSpec wire shape (see +// applecontainer/lifecycle_darwin_arm64.go for the Go +// marshaller). Response: +// { "ok": true, "data": { "id": "" } } +// or { "ok": false, "err": "..." }. +// RunSpec.RunArgs / Privileged / SecurityOpt are not +// modeled on this backend per design §8. The Go layer +// rejects callers that populate them with a typed +// UnsupportedOptionError before reaching this entry +// point; the wire shape doesn't carry those fields. +// Image must be pre-pulled. +// Blocking: up to 60s (covers cold kernel + init-image fetch on +// first run; cached after that). +const char* ac_run(const char* spec_json); + +// ac_start bootstraps and starts a previously created container in +// detached mode (no stdio attachment). Idempotent: a running +// container is a no-op success. Wraps ContainerClient.bootstrap + +// ClientProcess.start. +// +// Ownership: caller frees with ac_free. +// Cancellation: not yet wired (PR-D). +// Threading: Swift Task + DispatchSemaphore wait on the cgo thread. +// Encoding: { "ok": true } | { "ok": false, "err": "..." }. +// Blocking: up to 60s; bootstrap is fast but `process.start()` +// spawns the in-VM init. +const char* ac_start(const char* id); + +// ac_stop stops a running container. Wraps ContainerClient.stop with +// the given grace-period; timeout_seconds <= 0 uses Apple's default. +// +// Ownership: caller frees with ac_free. +// Cancellation: not yet wired (PR-D). +// Threading: Swift Task + DispatchSemaphore wait on the cgo thread. +// Encoding: { "ok": true } | { "ok": false, "err": "..." }. +// Blocking: up to 60s (covers the grace period + SIGKILL fallback). +const char* ac_stop(const char* id, int32_t timeout_seconds); + +// ac_delete removes a container. `force != 0` deletes even if the +// container is running. Wraps ContainerClient.delete. +// +// Ownership: caller frees with ac_free. +// Cancellation: not yet wired (PR-D). +// Threading: Swift Task + DispatchSemaphore wait on the cgo thread. +// Encoding: { "ok": true } | { "ok": false, "err": "..." }. +// Blocking: up to 60s. +const char* ac_delete(const char* id, int32_t force); + +// ---- PR-D: Exec (stdin/TTY + cancellation) ------------------------- + +// ac_exec_start launches an exec process inside a running container. +// stdio fds are caller-supplied via os.Pipe() on the Go side: caller +// passes the apiserver-facing end (read-end for stdin, write-end for +// stdout/stderr); -1 disables that stream. XPC dup's the fds when the +// XPCMessage is serialized, so the caller may close its passed fd +// immediately after this call returns (the process keeps the dup'd +// copy). +// +// Ownership: caller frees the returned JSON with ac_free. +// Cancellation: returned `handle` is the cancellation token. Pass +// it to ac_exec_signal with SIGTERM (or any signal) +// to deliver to the in-VM process. +// Threading: Swift Task + DispatchSemaphore wait on the cgo +// thread. The launched process runs on its own VM +// thread; this call returns once createProcess + +// start have settled at the apiserver. +// Encoding: opts_json = { "cmd":[..], "env":[..], "user":"", +// "workingDir":"", "tty":false }. +// Response on success: +// { "ok": true, "data": { "handle": uint64 } } +// On failure: { "ok": false, "err": "..." }. After +// this returns ok, the caller MUST eventually call +// ac_exec_release(handle) to free the registry slot. +// Blocking: up to 30s (createProcess XPC + start). +const char* ac_exec_start( + const char* id, + const char* opts_json, + int32_t stdin_read_fd, + int32_t stdout_write_fd, + int32_t stderr_write_fd +); + +// ac_exec_wait blocks until the exec process exits, returning its +// exit code. timeout_seconds <= 0 disables the timeout (capped to +// ~Int32.max/1000 internally to avoid Duration overflow). +// +// Ownership: caller frees the returned JSON with ac_free. +// Cancellation: external — call ac_exec_signal from another thread +// to send SIGTERM; this wait then returns the +// resulting exit code naturally. +// Threading: Swift Task + DispatchSemaphore wait on the cgo +// thread. +// Encoding: { "ok": true, "data": { "exitCode": int32 } } or +// { "ok": false, "err": "..." }. +// Blocking: unbounded by design (modulo timeout_seconds). +const char* ac_exec_wait(uint64_t handle, int32_t timeout_seconds); + +// ac_exec_signal delivers a signal to the in-VM process. The +// cancellation contract: Go's ctx.Done() goroutine calls this with +// SIGTERM; ac_exec_wait then returns naturally as the process exits. +// +// Ownership: caller frees the returned JSON with ac_free. +// Cancellation: n/a (this IS the cancellation primitive). +// Threading: Swift Task + DispatchSemaphore wait on the cgo +// thread. +// Encoding: { "ok": true } or { "ok": false, "err": "..." }. +// Blocking: up to 5s (the apiserver's kill XPC is fast). +const char* ac_exec_signal(uint64_t handle, int32_t signal); + +// ac_exec_release frees the handle's registry slot. Idempotent — +// calling on an unknown handle is a no-op. Must be called after +// ac_exec_wait returns to avoid leaking ClientProcess instances. +// +// Ownership: n/a; no return value. +// Cancellation: n/a. +// Threading: safe from any thread. +// Encoding: n/a. +// Blocking: non-blocking; lock acquisition only. +void ac_exec_release(uint64_t handle); + +// ---- PR-E: Logs streaming ------------------------------------------ + +// ac_logs_open returns a dup'd file descriptor for the container's +// stdio log. The fd is a regular file on disk; reads return 0 bytes +// at EOF. Callers implement follow mode by polling on EOF; ctx +// cancellation is signaled by closing the fd Go-side. +// +// Ownership: caller owns the returned fd and must close it with +// close(2). The JSON envelope itself is freed with +// ac_free as usual. +// Cancellation: external — close the fd from another thread to +// unblock a Go read. +// Threading: Swift Task + DispatchSemaphore wait on the cgo +// thread. +// Encoding: { "ok": true, "data": { "fd": int32 } } or +// { "ok": false, "err": "..." }. +// Blocking: up to 10s (one XPC round-trip + dup). +const char* ac_logs_open(const char* id); + +// ---- PR-F: Pull ---------------------------------------------------- + +// ac_pull_image fetches an image from a remote registry into the +// local content store. Synchronous; returns when the image is fully +// pulled and unpacked. +// +// Ownership: caller frees the returned JSON with ac_free. +// Cancellation: not yet wired. Apple's pull API doesn't expose a +// cancellation token; aborting cleanly would require +// deleting the partial image — left for a future PR. +// Documented in design §8. +// Threading: Swift Task + DispatchSemaphore wait on the cgo +// thread. +// Encoding: { "ok": true, "data": { "reference": "...", "digest": "..." } } +// or { "ok": false, "err": "..." }. +// Blocking: up to 30 min (covers a cold pull of a multi-GB +// base image on a reasonable network). The bridge's +// timeout will trip before this in most realistic +// cases. +const char* ac_pull_image(const char* reference); + +// ---- PR-G2: Build -------------------------------------------------- + +// ac_build_probe checks whether Apple's buildkit container is up. +// Callers use this to short-circuit with a typed +// BuilderUnavailableError before paying the cost of marshaling a +// full BuildSpec. +// +// Ownership: caller frees with ac_free. +// Cancellation: not yet wired. +// Threading: Swift Task + DispatchSemaphore wait on the cgo +// thread. +// Encoding: Success: { "ok": true }. +// Failure with stable code: +// { "ok": false, +// "code": "BUILDER_UNAVAILABLE", +// "err": "" } +// Failure without a known code: +// { "ok": false, "err": "..." } +// The `code` field is the machine-readable contract +// the Go side keys typed errors off of; `err` is for +// diagnostics only. +// Blocking: up to 5s (one XPC round-trip). +const char* ac_build_probe(void); + +// ac_build performs the actual BuildKit build. Dials the buildkit +// container over vsock (must be running; we surface a clear error +// otherwise), constructs a SwiftNIO-backed Builder, runs the build +// to an OCI tarball export, then loads + unpacks + tags the result +// in the local content store. +// +// Ownership: caller frees with ac_free. +// Cancellation: not yet wired. Build is long-running; callers +// should treat it as best-effort uninterruptible +// until a follow-up PR adds streaming cancellation. +// Threading: Swift Task + DispatchSemaphore wait on the cgo +// thread. Internally spins up a NIO +// MultiThreadedEventLoopGroup for the duration of +// the build and shuts it down on the way out. +// Encoding: spec_json fields (omitempty unless noted): +// contextPath (required), dockerfile, +// tag, args (map[string]string), target, +// cacheFrom ([]string), noCache (bool), +// platform (single platform string, e.g. linux/arm64). +// Engine concepts not modeled on this backend +// (RunArgs, Privileged, SecurityOpt analogues) are +// rejected by the Go layer before reaching this +// entry point — same pattern as ac_run (design §8). +// Multi-platform builds are out of scope; pass a +// single platform. +// Success: +// { "ok": true, "data": { "reference": "...", "digest": "..." } } +// Failure with stable code (same contract as +// ac_build_probe — currently BUILDER_UNAVAILABLE): +// { "ok": false, "code": "...", "err": "..." } +// Failure without a known code: +// { "ok": false, "err": "..." }. +// BuildKit progress goes to FileHandle.standardError +// of the bridge process (which the Go process +// inherits). Typed BuildEvent streaming is a future +// PR — for now callers see raw output on stderr. +// Blocking: up to 30 min (covers a cold cache pull + multi- +// layer build). The bridge's internal timeout fires +// at the same horizon. +const char* ac_build(const char* spec_json); + #endif diff --git a/runtime/applecontainer/build_darwin_arm64.go b/runtime/applecontainer/build_darwin_arm64.go new file mode 100644 index 0000000..cea5d78 --- /dev/null +++ b/runtime/applecontainer/build_darwin_arm64.go @@ -0,0 +1,156 @@ +//go:build darwin && arm64 + +package applecontainer + +/* +#include +#include "shim.h" +*/ +import "C" + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "unsafe" + + "github.com/crunchloop/devcontainer/runtime" +) + +// buildSpecWire mirrors applecontainer-bridge/Sources/ACBridge/build.swift's +// BuildSpecJSON. Engine concepts we don't model on this backend +// (RunArgs/Privileged/SecurityOpt analogs) are intentionally absent +// from this wire type — same pattern as runSpecJSON in lifecycle. +type buildSpecWire struct { + ContextPath string `json:"contextPath"` + Dockerfile string `json:"dockerfile,omitempty"` + Tag string `json:"tag,omitempty"` + Args map[string]string `json:"args,omitempty"` + Target string `json:"target,omitempty"` + CacheFrom []string `json:"cacheFrom,omitempty"` + NoCache bool `json:"noCache,omitempty"` + Platform string `json:"platform,omitempty"` +} + +type buildResultData struct { + Reference string `json:"reference"` + Digest string `json:"digest"` +} + +// BuildImage performs a Dockerfile build via Apple's BuildKit +// container. Requires the user to have run `container builder start` +// beforehand — auto-start would require reimplementing +// BuilderStart.start which is module-internal to ContainerCommands. +// +// Behavior: +// - If the builder is not running, returns +// *runtime.BuilderUnavailableError with a hint. +// - Otherwise dials buildkit over vsock, runs the BuildKit build, +// loads the resulting OCI tarball into the local content store, +// tags it with spec.Tag, and returns a runtime.ImageRef. +// +// Progress events: PR-G2 ships without streaming events. BuildKit's +// progress output goes to the bridge's stderr (which the Go process +// inherits). Callers see raw build output on the console. A future +// PR can swap in a pipe-fd capture and emit typed BuildEvents. +func (r *Runtime) BuildImage(ctx context.Context, spec runtime.BuildSpec, events chan<- runtime.BuildEvent) (runtime.ImageRef, error) { + if err := ctx.Err(); err != nil { + return runtime.ImageRef{}, err + } + if err := ensureLoaded(); err != nil { + return runtime.ImageRef{}, err + } + + // Builder-liveness probe. Cheap and gives us a clean typed error + // before we marshal the full spec. The bridge tags the + // builder-down case with code="BUILDER_UNAVAILABLE"; we key off + // that rather than message text. + probeRaw := goStringAndFree(C.ac_build_probe_p()) + if probeRaw == "" { + return runtime.ImageRef{}, errors.New("applecontainer: bridge returned nil for BuildImage probe") + } + if probeEnv, err := decodeEnvelope[map[string]any](probeRaw); err != nil { + if probeEnv.Code == bridgeCodeBuilderUnavailable { + return runtime.ImageRef{}, &runtime.BuilderUnavailableError{ + Hint: "run `container builder start` to start the build VM", + Err: err, + } + } + return runtime.ImageRef{}, err + } + + emitBuildEvent(events, runtime.BuildEvent{ + Kind: runtime.BuildEventLog, + Message: "applecontainer: building " + spec.Tag, + }) + + wire := buildSpecWire{ + ContextPath: spec.ContextPath, + Dockerfile: spec.Dockerfile, + Tag: spec.Tag, + Args: spec.Args, + Target: spec.Target, + CacheFrom: spec.CacheFrom, + NoCache: spec.NoCache, + Platform: spec.Platform, + } + specBytes, err := json.Marshal(wire) + if err != nil { + return runtime.ImageRef{}, fmt.Errorf("applecontainer: marshal BuildSpec: %w", err) + } + cSpec := C.CString(string(specBytes)) + defer C.free(unsafe.Pointer(cSpec)) + + raw := goStringAndFree(C.ac_build_p(cSpec)) + if raw == "" { + return runtime.ImageRef{}, errors.New("applecontainer: bridge returned nil for BuildImage") + } + env, err := decodeEnvelope[buildResultData](raw) + if err != nil { + // Apple's "builder not running" error path can surface here + // if the buildkit container vanished between probe and build. + // Primary key is the bridge's machine-readable `code`; the + // message-text fallback covers older bridge builds. + if env.Code == bridgeCodeBuilderUnavailable || isBuilderUnavailable(err) { + return runtime.ImageRef{}, &runtime.BuilderUnavailableError{ + Hint: "run `container builder start` to start the build VM", + Err: err, + } + } + return runtime.ImageRef{}, err + } + + emitBuildEvent(events, runtime.BuildEvent{ + Kind: runtime.BuildEventCompleted, + Digest: env.decoded.Digest, + Message: "applecontainer: built " + env.decoded.Reference, + }) + + tags := []string{} + if env.decoded.Reference != "" { + tags = append(tags, env.decoded.Reference) + } + return runtime.ImageRef{ + ID: env.decoded.Digest, + Tags: tags, + }, nil +} + +// bridgeCodeBuilderUnavailable matches the `code` the Swift bridge +// stamps on the failure envelope when Apple's buildkit container is +// not running. Keep in sync with applecontainer-bridge/Sources/ +// ACBridge/build.swift. +const bridgeCodeBuilderUnavailable = "BUILDER_UNAVAILABLE" + +func isBuilderUnavailable(err error) bool { + if err == nil { + return false + } + msg := err.Error() + return containsAny(msg, + "builder not running", + "container buildkit not found", + "buildkit container", + ) +} diff --git a/runtime/applecontainer/build_darwin_arm64_test.go b/runtime/applecontainer/build_darwin_arm64_test.go new file mode 100644 index 0000000..b753299 --- /dev/null +++ b/runtime/applecontainer/build_darwin_arm64_test.go @@ -0,0 +1,157 @@ +//go:build darwin && arm64 + +package applecontainer + +import ( + "context" + "errors" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/crunchloop/devcontainer/runtime" +) + +// TestBuildImage_DockerfileSmoke is the PR-G2 happy-path test: +// write a 2-line Dockerfile, BuildImage, then InspectImage to confirm +// the resulting tag exists in the local content store. Skips when +// the builder isn't running (PR-G stub path). +func TestBuildImage_DockerfileSmoke(t *testing.T) { + rt := runtimeOrSkip(t) + ctx := context.Background() + + // Warm alpine first — the FROM line references it. + cliRunStrict(t, + "run", "--rm", "--name", "ac-alpine-warmup", + "docker.io/library/alpine:latest", "/bin/true", + ) + + contextDir := t.TempDir() + dockerfile := "FROM docker.io/library/alpine:latest\nRUN echo built-by-pr-g2 > /built-marker\n" + if err := os.WriteFile(filepath.Join(contextDir, "Dockerfile"), []byte(dockerfile), 0o644); err != nil { + t.Fatalf("write dockerfile: %v", err) + } + + tag := "applecontainer-bridge-test/pr-g2-smoke:latest" + events := make(chan runtime.BuildEvent, 8) + imageRef, err := rt.BuildImage(ctx, runtime.BuildSpec{ + ContextPath: contextDir, + Dockerfile: "Dockerfile", + Tag: tag, + }, events) + close(events) + + if err != nil { + var unavail *runtime.BuilderUnavailableError + if errors.As(err, &unavail) { + t.Skipf("builder not running (run `container builder start`): %v", err) + } + t.Fatalf("BuildImage: %v", err) + } + + if imageRef.ID == "" { + t.Errorf("ImageRef.ID (digest) empty") + } + if len(imageRef.Tags) == 0 || imageRef.Tags[0] != tag { + t.Errorf("Tags: want [%q], got %v", tag, imageRef.Tags) + } + + // Inspect should now see the tag locally. + details, err := rt.InspectImage(ctx, tag) + if err != nil { + t.Fatalf("InspectImage(%q): %v", tag, err) + } + if details.ID == "" { + t.Errorf("InspectImage: empty digest") + } + + // Event surface: at least one completed event with a digest. + var sawCompleted bool + for ev := range events { + if ev.Kind == runtime.BuildEventCompleted { + sawCompleted = true + if ev.Digest == "" { + t.Errorf("BuildEventCompleted: empty digest") + } + } + } + if !sawCompleted { + t.Errorf("no BuildEventCompleted emitted") + } +} + +// TestBuildImage_NoCache_FreshLayers re-runs the same Dockerfile with +// NoCache=true and asserts the RUN step is NOT marked CACHED. The +// previous smoke test relies on the cache hit; this one verifies the +// flag actually plumbs through. +func TestBuildImage_NoCache_FreshLayers(t *testing.T) { + rt := runtimeOrSkip(t) + ctx := context.Background() + + cliRunStrict(t, + "run", "--rm", "--name", "ac-alpine-warmup", + "docker.io/library/alpine:latest", "/bin/true", + ) + + contextDir := t.TempDir() + // Use $(date) so each build sees a different RUN line if cache + // is honored — actually, we want the OPPOSITE: same RUN line, + // noCache=true → fresh execution. The buildkit output won't say + // "CACHED" in that case. Hard to assert from Go-side since + // progress is on stderr; we mostly assert the call succeeds and + // produces an image. + dockerfile := "FROM docker.io/library/alpine:latest\nRUN echo nocache-pass\n" + if err := os.WriteFile(filepath.Join(contextDir, "Dockerfile"), []byte(dockerfile), 0o644); err != nil { + t.Fatalf("write dockerfile: %v", err) + } + + tag := "applecontainer-bridge-test/pr-g2-nocache:latest" + imageRef, err := rt.BuildImage(ctx, runtime.BuildSpec{ + ContextPath: contextDir, + Dockerfile: "Dockerfile", + Tag: tag, + NoCache: true, + }, nil) + if err != nil { + var unavail *runtime.BuilderUnavailableError + if errors.As(err, &unavail) { + t.Skipf("builder not running: %v", err) + } + t.Fatalf("BuildImage(noCache): %v", err) + } + if imageRef.ID == "" { + t.Errorf("ImageRef.ID empty") + } +} + +// TestBuildImage_PartialContract keeps the PR-G builder-unavailable +// path covered: if no builder is running the call MUST return a typed +// error, not surface a generic one. Only meaningful when the builder +// is actually down; otherwise we skip rather than warm it back up. +func TestBuildImage_BuilderDownTypedError(t *testing.T) { + rt := runtimeOrSkip(t) + // Try to stop the builder to force the error path. Best-effort — + // if it fails (e.g. wasn't running anyway), proceed. + cliRun(t, "builder", "stop") + + _, err := rt.BuildImage(context.Background(), runtime.BuildSpec{ + ContextPath: t.TempDir(), + Dockerfile: "Dockerfile", + Tag: "should-fail:latest", + }, nil) + if err == nil { + // Builder came back up between our stop and the call — + // skip rather than misreport. + t.Skip("builder is up; cannot exercise the down path here") + } + var unavail *runtime.BuilderUnavailableError + if !errors.As(err, &unavail) { + // Could also be an "image not found" or other typed error if + // the dockerfile read fails before the builder dial. Inspect + // for clues. + if !strings.Contains(err.Error(), "Dockerfile") { + t.Errorf("want *BuilderUnavailableError, got %T: %v", err, err) + } + } +} diff --git a/runtime/applecontainer/envelope_test.go b/runtime/applecontainer/envelope_test.go index f69df3e..62d90c5 100644 --- a/runtime/applecontainer/envelope_test.go +++ b/runtime/applecontainer/envelope_test.go @@ -3,8 +3,11 @@ package applecontainer import ( + "errors" "strings" "testing" + + "github.com/crunchloop/devcontainer/runtime" ) // Pure-Go tests for the envelope decoder. No daemon, no cgo at @@ -61,6 +64,47 @@ func TestDecodeEnvelope_Malformed(t *testing.T) { } } +// TestRejectUnsupportedRunSpec pins the contract that RunArgs, +// Privileged, and SecurityOpt fail fast with a typed error rather +// than being silently dropped at the bridge boundary. +func TestRejectUnsupportedRunSpec(t *testing.T) { + cases := []struct { + name string + spec runtime.RunSpec + opt string + }{ + {name: "RunArgs", spec: runtime.RunSpec{RunArgs: []string{"--add-host=foo:1.2.3.4"}}, opt: "RunArgs"}, + {name: "Privileged", spec: runtime.RunSpec{Privileged: true}, opt: "Privileged"}, + {name: "SecurityOpt", spec: runtime.RunSpec{SecurityOpt: []string{"no-new-privileges"}}, opt: "SecurityOpt"}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := rejectUnsupportedRunSpec(tc.spec) + if err == nil { + t.Fatalf("want error, got nil") + } + var unsup *runtime.UnsupportedOptionError + if !errors.As(err, &unsup) { + t.Fatalf("want *UnsupportedOptionError, got %T: %v", err, err) + } + if unsup.Option != tc.opt { + t.Errorf("Option = %q, want %q", unsup.Option, tc.opt) + } + if unsup.Backend != "applecontainer" { + t.Errorf("Backend = %q, want %q", unsup.Backend, "applecontainer") + } + }) + } +} + +// TestRejectUnsupportedRunSpec_AllowsZeroValue confirms the validator +// is a no-op for the common case (all unsupported fields empty). +func TestRejectUnsupportedRunSpec_AllowsZeroValue(t *testing.T) { + if err := rejectUnsupportedRunSpec(runtime.RunSpec{Image: "x", Name: "y"}); err != nil { + t.Errorf("want nil, got %v", err) + } +} + func TestMapState(t *testing.T) { cases := map[string]string{ "running": "running", diff --git a/runtime/applecontainer/exec_darwin_arm64.go b/runtime/applecontainer/exec_darwin_arm64.go new file mode 100644 index 0000000..e20fade --- /dev/null +++ b/runtime/applecontainer/exec_darwin_arm64.go @@ -0,0 +1,349 @@ +//go:build darwin && arm64 + +package applecontainer + +/* +#include +#include "shim.h" +*/ +import "C" + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/crunchloop/devcontainer/runtime" +) + +// execOptsJSON mirrors lifecycle's RunSpecJSON pattern: explicit +// wire-only struct so the silently-dropped fields from +// runtime.ExecOptions are visible in code review. +type execOptsJSON struct { + Cmd []string `json:"cmd"` + Env []string `json:"env,omitempty"` + User string `json:"user,omitempty"` + WorkingDir string `json:"workingDir,omitempty"` + TTY bool `json:"tty,omitempty"` +} + +type execStartData struct { + Handle uint64 `json:"handle"` +} + +type execWaitData struct { + ExitCode int32 `json:"exitCode"` +} + +// ExecContainer runs a command inside a running container. +// +// stdio plumbing: each requested stream gets an os.Pipe pair. The +// "apiserver-facing" end is passed to the bridge as an fd (XPC dup's +// it during the createProcess XPC), then closed locally. The +// "Go-facing" end stays open and is wired to the caller's +// io.Reader/Writer via goroutines. +// +// Cancellation: a ctx-watcher goroutine sends SIGTERM via the bridge +// when ctx.Done() fires. The wait then returns naturally (typically +// exit code 143 = 128 + SIGTERM). ExecContainer returns ctx.Err() in +// that case, regardless of the underlying exit code. +// +// When opts.Stdout/Stderr are nil, output is captured into the +// returned ExecResult.Stdout/Stderr. The bridge always opens a +// stdout pipe; stderr is suppressed in TTY mode (Apple merges +// stderr into stdout when terminal=true) and the captured/streamed +// stderr is empty in that case. +func (r *Runtime) ExecContainer(ctx context.Context, id string, opts runtime.ExecOptions) (runtime.ExecResult, error) { + if err := ctx.Err(); err != nil { + return runtime.ExecResult{}, err + } + if err := ensureLoaded(); err != nil { + return runtime.ExecResult{}, err + } + if len(opts.Cmd) == 0 { + return runtime.ExecResult{}, errors.New("applecontainer: ExecOptions.Cmd is empty") + } + + // Build the pipes Go-side. Failure on any pipe is unrecoverable; + // we close partial allocations before bailing. + pipes, err := openExecPipes(opts) + if err != nil { + return runtime.ExecResult{}, err + } + defer pipes.closeLocal() // belt-and-suspenders; goroutines also close + + // Marshal the per-call opts and hand fds to the bridge. + wire := execOptsJSON{ + Cmd: opts.Cmd, + Env: envMapToSlice(opts.Env), + User: opts.User, + WorkingDir: opts.WorkingDir, + TTY: opts.Tty, + } + optsBytes, err := json.Marshal(wire) + if err != nil { + return runtime.ExecResult{}, fmt.Errorf("applecontainer: marshal exec opts: %w", err) + } + + cID := C.CString(id) + defer C.free(unsafe.Pointer(cID)) + cOpts := C.CString(string(optsBytes)) + defer C.free(unsafe.Pointer(cOpts)) + + raw := goStringAndFree(C.ac_exec_start_p( + cID, cOpts, + C.int32_t(pipes.stdinReadFd()), + C.int32_t(pipes.stdoutWriteFd()), + C.int32_t(pipes.stderrWriteFd()), + )) + if raw == "" { + return runtime.ExecResult{}, errors.New("applecontainer: bridge returned nil for ExecStart") + } + startEnv, err := decodeEnvelope[execStartData](raw) + if err != nil { + return runtime.ExecResult{}, mapLifecycleErr(id, err) + } + handle := startEnv.decoded.Handle + + // The bridge (and through XPC, the apiserver) now owns dup'd + // copies of the apiserver-facing fds. Close ours so the only + // references are the ones we still want (Go-facing ends). + pipes.closeRemoteEnds() + + // Spawn the stdio goroutines. We use a WaitGroup to ensure all + // copies drain before we return — losing stdout because the + // reader goroutine hadn't finished would be a silent footgun. + var ( + wg sync.WaitGroup + stdoutBuf strings.Builder + stderrBuf strings.Builder + stdoutSink = pickWriter(opts.Stdout, &stdoutBuf) + stderrSink = pickWriter(opts.Stderr, &stderrBuf) + copyErrCh = make(chan error, 3) + ) + + if pipes.stdinWriter() != nil { + wg.Add(1) + go func() { + defer wg.Done() + // Write what the caller provided, then close so the + // container sees EOF on stdin. Errors here are usually + // "broken pipe" because the process exited; surface them + // via copyErrCh but don't fail the exec. + if opts.Stdin != nil { + if _, err := io.Copy(pipes.stdinWriter(), opts.Stdin); err != nil && !isBrokenPipe(err) { + copyErrCh <- fmt.Errorf("stdin copy: %w", err) + } + } + pipes.stdinWriter().Close() + }() + } + if pipes.stdoutReader() != nil { + wg.Add(1) + go func() { + defer wg.Done() + if _, err := io.Copy(stdoutSink, pipes.stdoutReader()); err != nil && !isBrokenPipe(err) { + copyErrCh <- fmt.Errorf("stdout copy: %w", err) + } + }() + } + if pipes.stderrReader() != nil { + wg.Add(1) + go func() { + defer wg.Done() + if _, err := io.Copy(stderrSink, pipes.stderrReader()); err != nil && !isBrokenPipe(err) { + copyErrCh <- fmt.Errorf("stderr copy: %w", err) + } + }() + } + + // ctx-watcher: send SIGTERM on cancel. The watcher exits either + // when ctx fires (cancelled path) or when we close `done` after + // wait returns (clean path). + var cancelled bool + var cancelMu sync.Mutex + doneSignal := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cancelMu.Lock() + cancelled = true + cancelMu.Unlock() + C.ac_exec_signal_p(C.uint64_t(handle), C.int32_t(syscall.SIGTERM)) + case <-doneSignal: + } + }() + + // Wait for the in-VM process to exit. This is the long block. + // timeout_seconds=0 means "use the bridge's internal max". + waitRaw := goStringAndFree(C.ac_exec_wait_p(C.uint64_t(handle), 0)) + close(doneSignal) + + // Always release the handle before we leave the function. + defer C.ac_exec_release_p(C.uint64_t(handle)) + + // Drain copy goroutines. The apiserver closes its fd when the + // process exits, which surfaces as EOF on our reader, so io.Copy + // returns naturally without us having to close anything. Closing + // the Go-facing ends BEFORE the drain (an earlier safety net) + // risked truncating buffered output, so it was removed. + wg.Wait() + close(copyErrCh) + + var copyErr error + for e := range copyErrCh { + copyErr = errors.Join(copyErr, e) + } + + cancelMu.Lock() + wasCancelled := cancelled + cancelMu.Unlock() + if wasCancelled { + // ctx.Err() (DeadlineExceeded / Canceled) takes precedence + // over whatever exit code the SIGTERMed process surfaced. + return runtime.ExecResult{}, ctx.Err() + } + + if waitRaw == "" { + return runtime.ExecResult{}, errors.New("applecontainer: bridge returned nil for ExecWait") + } + waitEnv, werr := decodeEnvelope[execWaitData](waitRaw) + if werr != nil { + return runtime.ExecResult{}, werr + } + + result := runtime.ExecResult{ + ExitCode: int(waitEnv.decoded.ExitCode), + } + if opts.Stdout == nil { + result.Stdout = stdoutBuf.String() + } + if opts.Stderr == nil { + result.Stderr = stderrBuf.String() + } + return result, copyErr +} + +// ---- pipe pair management ------------------------------------------ + +// execPipes holds the three pipe pairs we may need for an exec. Each +// entry is nil if that stream is unused (no stdin requested, or TTY +// mode for stderr). The "apiserver-facing" end is the one passed to +// the bridge and closed locally after start; the "Go-facing" end is +// the one we read/write. +type execPipes struct { + stdinRead, stdinWrite *os.File + stdoutRead, stdoutWrite *os.File + stderrRead, stderrWrite *os.File +} + +func openExecPipes(opts runtime.ExecOptions) (*execPipes, error) { + p := &execPipes{} + cleanup := func() { p.closeLocal(); p.closeRemoteEnds() } + + if opts.Stdin != nil { + r, w, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("pipe(stdin): %w", err) + } + p.stdinRead, p.stdinWrite = r, w + } + + // We always open stdout — capture or stream. The caller can't + // opt out; suppression is up to the in-VM process. + { + r, w, err := os.Pipe() + if err != nil { + cleanup() + return nil, fmt.Errorf("pipe(stdout): %w", err) + } + p.stdoutRead, p.stdoutWrite = r, w + } + + // Stderr is suppressed in TTY mode (Apple merges into stdout). + if !opts.Tty { + r, w, err := os.Pipe() + if err != nil { + cleanup() + return nil, fmt.Errorf("pipe(stderr): %w", err) + } + p.stderrRead, p.stderrWrite = r, w + } + + return p, nil +} + +func (p *execPipes) stdinReadFd() int { return fdOrMinusOne(p.stdinRead) } +func (p *execPipes) stdoutWriteFd() int { return fdOrMinusOne(p.stdoutWrite) } +func (p *execPipes) stderrWriteFd() int { return fdOrMinusOne(p.stderrWrite) } + +func (p *execPipes) stdinWriter() *os.File { return p.stdinWrite } +func (p *execPipes) stdoutReader() *os.File { return p.stdoutRead } +func (p *execPipes) stderrReader() *os.File { return p.stderrRead } + +// closeRemoteEnds closes the apiserver-facing ends. Called right +// after ac_exec_start succeeds, since XPC has dup'd those fds. +func (p *execPipes) closeRemoteEnds() { + closeIf(p.stdinRead) + p.stdinRead = nil + closeIf(p.stdoutWrite) + p.stdoutWrite = nil + closeIf(p.stderrWrite) + p.stderrWrite = nil +} + +// closeLocal closes everything still open. Safe to call multiple +// times. +func (p *execPipes) closeLocal() { + closeIf(p.stdinRead) + closeIf(p.stdinWrite) + closeIf(p.stdoutRead) + closeIf(p.stdoutWrite) + closeIf(p.stderrRead) + closeIf(p.stderrWrite) + p.stdinRead, p.stdinWrite = nil, nil + p.stdoutRead, p.stdoutWrite = nil, nil + p.stderrRead, p.stderrWrite = nil, nil +} + +func closeIf(f *os.File) { + if f != nil { + _ = f.Close() + } +} + +func fdOrMinusOne(f *os.File) int { + if f == nil { + return -1 + } + return int(f.Fd()) +} + +func pickWriter(w io.Writer, fallback io.Writer) io.Writer { + if w == nil { + return fallback + } + return w +} + +func isBrokenPipe(err error) bool { + if err == nil { + return false + } + // errors.Is matches syscall.EPIPE on the standard path; the + // string fallback covers wrapped/wrapped-and-formatted variants + // that some library boundaries produce. + if errors.Is(err, syscall.EPIPE) { + return true + } + msg := err.Error() + return strings.Contains(msg, "broken pipe") || + strings.Contains(msg, "file already closed") +} diff --git a/runtime/applecontainer/exec_darwin_arm64_test.go b/runtime/applecontainer/exec_darwin_arm64_test.go new file mode 100644 index 0000000..adcdeab --- /dev/null +++ b/runtime/applecontainer/exec_darwin_arm64_test.go @@ -0,0 +1,189 @@ +//go:build darwin && arm64 + +package applecontainer + +import ( + "bytes" + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/crunchloop/devcontainer/runtime" +) + +// runningContainer sets up a long-lived alpine container the test +// function can exec into. Cleans up automatically. +func runningContainer(t *testing.T, id string) *Runtime { + t.Helper() + rt := runtimeOrSkip(t) + ctx := context.Background() + + _ = rt.RemoveContainer(ctx, id, runtime.RemoveOptions{Force: true}) + t.Cleanup(func() { + _ = rt.RemoveContainer(ctx, id, runtime.RemoveOptions{Force: true}) + }) + + cliRunStrict(t, + "run", "--rm", "--name", "ac-alpine-warmup", + "docker.io/library/alpine:latest", "/bin/true", + ) + + if _, err := rt.RunContainer(ctx, runtime.RunSpec{ + Image: "docker.io/library/alpine:latest", + Name: id, + Cmd: []string{"sleep", "180"}, + }); err != nil { + t.Fatalf("RunContainer: %v", err) + } + if err := rt.StartContainer(ctx, id); err != nil { + t.Fatalf("StartContainer: %v", err) + } + if err := waitForState(t, rt, id, runtime.StateRunning, 5*time.Second); err != nil { + t.Fatalf("waitForState running: %v", err) + } + return rt +} + +// TestExec_CaptureStdoutAndExit covers the bread-and-butter path: +// no stdin, captured stdout, exit code propagation. +func TestExec_CaptureStdoutAndExit(t *testing.T) { + rt := runningContainer(t, "ac-exec-capture") + ctx := context.Background() + + res, err := rt.ExecContainer(ctx, "ac-exec-capture", runtime.ExecOptions{ + Cmd: []string{"/bin/sh", "-c", "echo hello-stdout; echo hello-stderr 1>&2; exit 7"}, + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if res.ExitCode != 7 { + t.Errorf("ExitCode: want 7 got %d", res.ExitCode) + } + if !strings.Contains(res.Stdout, "hello-stdout") { + t.Errorf("Stdout: want contains %q, got %q", "hello-stdout", res.Stdout) + } + if !strings.Contains(res.Stderr, "hello-stderr") { + t.Errorf("Stderr: want contains %q, got %q", "hello-stderr", res.Stderr) + } +} + +// TestExec_StdinRoundTrip pipes a payload through cat and verifies +// the bidirectional pipe wiring. +func TestExec_StdinRoundTrip(t *testing.T) { + rt := runningContainer(t, "ac-exec-stdin") + ctx := context.Background() + + const payload = "ping-pong-marker-42" + res, err := rt.ExecContainer(ctx, "ac-exec-stdin", runtime.ExecOptions{ + Cmd: []string{"/bin/cat"}, + Stdin: strings.NewReader(payload), + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if res.ExitCode != 0 { + t.Errorf("ExitCode: want 0 got %d", res.ExitCode) + } + if !strings.Contains(res.Stdout, payload) { + t.Errorf("Stdout: want contains %q, got %q", payload, res.Stdout) + } +} + +// TestExec_StreamingWriters bypasses the captured-string fallback to +// exercise the io.Writer streaming path (which is what DAP's readiness +// probe + workd attach will use in production). +func TestExec_StreamingWriters(t *testing.T) { + rt := runningContainer(t, "ac-exec-stream") + ctx := context.Background() + + var outBuf, errBuf bytes.Buffer + res, err := rt.ExecContainer(ctx, "ac-exec-stream", runtime.ExecOptions{ + Cmd: []string{"/bin/sh", "-c", "echo stream-out; echo stream-err 1>&2"}, + Stdout: &outBuf, + Stderr: &errBuf, + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if res.ExitCode != 0 { + t.Errorf("ExitCode: want 0 got %d", res.ExitCode) + } + // Captured fields stay empty when caller provides writers — this + // is the documented contract on runtime.ExecOptions. + if res.Stdout != "" || res.Stderr != "" { + t.Errorf("captured fields should be empty when writers provided; got Stdout=%q Stderr=%q", + res.Stdout, res.Stderr) + } + if !strings.Contains(outBuf.String(), "stream-out") { + t.Errorf("stdout buffer: want contains stream-out, got %q", outBuf.String()) + } + if !strings.Contains(errBuf.String(), "stream-err") { + t.Errorf("stderr buffer: want contains stream-err, got %q", errBuf.String()) + } +} + +// TestExec_ContextCancelKillsProcess is the design §11.3 question. +// A long-running process is launched, ctx is cancelled after a beat, +// and we assert ExecContainer returns ctx.Err() promptly (not after +// the process's natural timeout). +func TestExec_ContextCancelKillsProcess(t *testing.T) { + rt := runningContainer(t, "ac-exec-cancel") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Spawn `sleep 60` then cancel after 500ms. ExecContainer should + // return well before 60s (we give it 10s tolerance for the + // SIGTERM round-trip + apiserver scheduling). + done := make(chan struct{}) + var execErr error + start := time.Now() + go func() { + defer close(done) + _, execErr = rt.ExecContainer(ctx, "ac-exec-cancel", runtime.ExecOptions{ + Cmd: []string{"/bin/sleep", "60"}, + }) + }() + + time.Sleep(500 * time.Millisecond) + cancel() + + select { + case <-done: + case <-time.After(15 * time.Second): + t.Fatal("ExecContainer did not return within 15s of ctx cancel — SIGTERM may not be propagating") + } + + elapsed := time.Since(start) + if elapsed > 12*time.Second { + t.Errorf("ExecContainer took %v after cancel; expected ≪ sleep timeout", elapsed) + } + if !errors.Is(execErr, context.Canceled) { + t.Errorf("ExecContainer returned err %v; want context.Canceled", execErr) + } +} + +// TestExec_EnvAndCwd verifies the per-call env and workingDir +// overrides land in the in-VM process. Engine relies on this for +// devcontainer.json's remoteEnv + workspaceFolder. +func TestExec_EnvAndCwd(t *testing.T) { + rt := runningContainer(t, "ac-exec-env") + ctx := context.Background() + + res, err := rt.ExecContainer(ctx, "ac-exec-env", runtime.ExecOptions{ + Cmd: []string{"/bin/sh", "-c", "echo MY=$MY_VAR PWD=$(pwd)"}, + Env: map[string]string{"MY_VAR": "set-from-exec", "PATH": "/usr/bin:/bin"}, + WorkingDir: "/tmp", + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if !strings.Contains(res.Stdout, "MY=set-from-exec") { + t.Errorf("env not honored: stdout=%q", res.Stdout) + } + if !strings.Contains(res.Stdout, "PWD=/tmp") { + t.Errorf("workingDir not honored: stdout=%q", res.Stdout) + } +} diff --git a/runtime/applecontainer/inspect_darwin_arm64.go b/runtime/applecontainer/inspect_darwin_arm64.go index 566536f..fa1c921 100644 --- a/runtime/applecontainer/inspect_darwin_arm64.go +++ b/runtime/applecontainer/inspect_darwin_arm64.go @@ -28,6 +28,7 @@ import ( type envelope[T any] struct { OK bool `json:"ok"` Err string `json:"err"` + Code string `json:"code,omitempty"` Data json.RawMessage `json:"data"` // Decoded payload — populated by decodeEnvelope on success. decoded T @@ -89,18 +90,41 @@ type containerInitProcess struct { } // containerMount mirrors the subset of Apple's Filesystem we need for -// MountInspect. Apple's full shape has more variants (volumes, tmpfs -// sub-cases, etc.); for PR-B we surface the bind-host case and leave -// the rest as opaque defaults. +// MountInspect. Apple's Filesystem.type is a Codable enum-with- +// associated-values rendered as `{"virtiofs":{}}` / `{"tmpfs":{}}` / +// `{"block":{...}}` / `{"volume":{...}}`; mountTypeWire decodes +// either that object shape or a bare string so the rest of the file +// keeps treating Type as a kind tag. type containerMount struct { - // Apple's Filesystem is an enum-with-associated-values from a - // Codable perspective; we read whichever shape arrives via the - // optional fields below. - Type string `json:"type"` - Source string `json:"source"` - Destination string `json:"destination"` - ReadOnly bool `json:"readonly"` - Options []string `json:"options"` + Type mountTypeWire `json:"type"` + Source string `json:"source"` + Destination string `json:"destination"` + Options []string `json:"options"` +} + +// mountTypeWire decodes Apple's FSType. Always lands as the variant +// name in lowercase ("virtiofs", "tmpfs", "block", "volume", ...). +type mountTypeWire string + +func (m *mountTypeWire) UnmarshalJSON(data []byte) error { + // Compatibility path: an older bridge or a test fixture might emit + // the kind as a plain string ("virtiofs"). Accept it first. + var asString string + if err := json.Unmarshal(data, &asString); err == nil { + *m = mountTypeWire(asString) + return nil + } + // Object form. The single key is the variant name; the value is + // the associated-values payload, which we don't need yet. + var asObject map[string]json.RawMessage + if err := json.Unmarshal(data, &asObject); err != nil { + return fmt.Errorf("mountTypeWire: %w (raw=%s)", err, string(data)) + } + for k := range asObject { + *m = mountTypeWire(k) + return nil + } + return errors.New("mountTypeWire: empty FSType object") } type imageInspectPayload struct { @@ -115,6 +139,32 @@ type imageInspectPayload struct { // ---- mapping helpers ------------------------------------------------- +// mountKindToRuntime maps Apple's FSType variant name to the canonical +// runtime.MountType string. Apple uses `virtiofs` for bind-style host +// mounts; we surface that as "bind" since callers (engine, tests) +// reason about mounts in Docker-style terms. +func mountKindToRuntime(kind string) string { + switch kind { + case "virtiofs": + return string(runtime.MountBind) + case "tmpfs": + return string(runtime.MountTmpfs) + case "volume": + return string(runtime.MountVolume) + default: + return kind + } +} + +func containsOption(opts []string, target string) bool { + for _, o := range opts { + if o == target { + return true + } + } + return false +} + func mapState(s string) runtime.State { switch s { case "running": @@ -141,10 +191,10 @@ func snapshotToDetails(s containerSnapshot) *runtime.ContainerDetails { mounts := make([]runtime.MountInspect, 0, len(s.Configuration.Mounts)) for _, m := range s.Configuration.Mounts { mounts = append(mounts, runtime.MountInspect{ - Type: m.Type, + Type: mountKindToRuntime(string(m.Type)), Source: m.Source, Target: m.Destination, - ReadOnly: m.ReadOnly, + ReadOnly: containsOption(m.Options, "ro"), }) } var startedAt time.Time diff --git a/runtime/applecontainer/lifecycle_darwin_arm64.go b/runtime/applecontainer/lifecycle_darwin_arm64.go new file mode 100644 index 0000000..29113d6 --- /dev/null +++ b/runtime/applecontainer/lifecycle_darwin_arm64.go @@ -0,0 +1,286 @@ +//go:build darwin && arm64 + +package applecontainer + +/* +#include +#include "shim.h" +*/ +import "C" + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "unsafe" + + "github.com/crunchloop/devcontainer/runtime" +) + +// runSpecJSON is the wire shape sent to the bridge. Fields are kept +// in lockstep with applecontainer-bridge/Sources/ACBridge/lifecycle.swift's +// RunSpecJSON. Anything we silently drop on this backend +// (RunArgs, Privileged, SecurityOpt) is intentionally absent here, so +// the build fails if a caller starts depending on those fields without +// updating the design doc. +type runSpecJSON struct { + Image string `json:"image"` + ID string `json:"id"` + Cmd []string `json:"cmd,omitempty"` + Entrypoint []string `json:"entrypoint,omitempty"` + User string `json:"user,omitempty"` + WorkingDir string `json:"workingDir,omitempty"` + Env []string `json:"env,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Mounts []mountJSON `json:"mounts,omitempty"` + InitProcess bool `json:"initProcess,omitempty"` + CapAdd []string `json:"capAdd,omitempty"` + OverrideCommand bool `json:"overrideCommand,omitempty"` +} + +type mountJSON struct { + Type string `json:"type"` + Source string `json:"source,omitempty"` + Target string `json:"target"` + ReadOnly bool `json:"readOnly,omitempty"` +} + +type runResultData struct { + ID string `json:"id"` +} + +// RunContainer creates a container from a RunSpec, returning the +// resulting handle. Apple's split is create→bootstrap→start; this +// method only does create, matching runtime.Runtime's contract that +// callers invoke StartContainer separately so the engine can write +// idempotency markers between phases. +func (r *Runtime) RunContainer(ctx context.Context, spec runtime.RunSpec) (*runtime.Container, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + if err := ensureLoaded(); err != nil { + return nil, err + } + if err := rejectUnsupportedRunSpec(spec); err != nil { + return nil, err + } + wire := runSpecToWire(spec) + if wire.ID == "" { + return nil, errors.New("applecontainer: RunSpec.Name is required (used as container id)") + } + if wire.Image == "" { + return nil, errors.New("applecontainer: RunSpec.Image is required") + } + specBytes, err := json.Marshal(wire) + if err != nil { + return nil, fmt.Errorf("applecontainer: marshal RunSpec: %w", err) + } + cSpec := C.CString(string(specBytes)) + defer C.free(unsafe.Pointer(cSpec)) + + raw := goStringAndFree(C.ac_run_p(cSpec)) + if raw == "" { + return nil, errors.New("applecontainer: bridge returned nil for RunContainer") + } + env, err := decodeEnvelope[runResultData](raw) + if err != nil { + return nil, mapRunErr(spec.Image, err) + } + return &runtime.Container{ + ID: env.decoded.ID, + Name: env.decoded.ID, + Image: spec.Image, + State: runtime.StateCreated, + }, nil +} + +// StartContainer bootstraps and starts a previously created container. +// Idempotent against already-running containers (the bridge short- +// circuits when the snapshot reports running). +func (r *Runtime) StartContainer(ctx context.Context, id string) error { + if err := ctx.Err(); err != nil { + return err + } + if err := ensureLoaded(); err != nil { + return err + } + cID := C.CString(id) + defer C.free(unsafe.Pointer(cID)) + raw := goStringAndFree(C.ac_start_p(cID)) + if raw == "" { + return errors.New("applecontainer: bridge returned nil for StartContainer") + } + if _, err := decodeEnvelope[json.RawMessage](raw); err != nil { + return mapLifecycleErr(id, err) + } + return nil +} + +// StopContainer sends SIGTERM with a grace period, then SIGKILL. Apple's +// stop API takes an Int32 grace period in seconds; we round nanos up +// and clamp. +func (r *Runtime) StopContainer(ctx context.Context, id string, opts runtime.StopOptions) error { + if err := ctx.Err(); err != nil { + return err + } + if err := ensureLoaded(); err != nil { + return err + } + var graceSec int32 + if opts.Timeout > 0 { + // Round up to whole seconds, clamp to MaxInt32 in the unlikely + // case of a wildly-large timeout. + secs := (opts.Timeout.Nanoseconds() + 999_999_999) / 1_000_000_000 + if secs > 1<<31-1 { + secs = 1<<31 - 1 + } + graceSec = int32(secs) + } + cID := C.CString(id) + defer C.free(unsafe.Pointer(cID)) + raw := goStringAndFree(C.ac_stop_p(cID, C.int32_t(graceSec))) + if raw == "" { + return errors.New("applecontainer: bridge returned nil for StopContainer") + } + if _, err := decodeEnvelope[json.RawMessage](raw); err != nil { + return mapLifecycleErr(id, err) + } + return nil +} + +// RemoveContainer deletes a container. Force=true allows deletion of +// running containers; RemoveVolumes is silently dropped on this +// backend (volume lifecycle isn't yet wired up — see PR-C scope cuts +// in design §8). +func (r *Runtime) RemoveContainer(ctx context.Context, id string, opts runtime.RemoveOptions) error { + if err := ctx.Err(); err != nil { + return err + } + if err := ensureLoaded(); err != nil { + return err + } + var force C.int32_t + if opts.Force { + force = 1 + } + cID := C.CString(id) + defer C.free(unsafe.Pointer(cID)) + raw := goStringAndFree(C.ac_delete_p(cID, force)) + if raw == "" { + return errors.New("applecontainer: bridge returned nil for RemoveContainer") + } + if _, err := decodeEnvelope[json.RawMessage](raw); err != nil { + return mapLifecycleErr(id, err) + } + return nil +} + +// rejectUnsupportedRunSpec fails fast on RunSpec fields that this +// backend cannot honor. Documented in design §8 as not modeled on +// Apple's `container` runtime; rather than silently dropping them +// (which would let callers observe apparent success with the option +// ignored), we surface a typed UnsupportedOptionError before crossing +// the cgo boundary. +func rejectUnsupportedRunSpec(spec runtime.RunSpec) error { + if len(spec.RunArgs) > 0 { + return &runtime.UnsupportedOptionError{Backend: "applecontainer", Option: "RunArgs"} + } + if spec.Privileged { + return &runtime.UnsupportedOptionError{Backend: "applecontainer", Option: "Privileged"} + } + if len(spec.SecurityOpt) > 0 { + return &runtime.UnsupportedOptionError{Backend: "applecontainer", Option: "SecurityOpt"} + } + return nil +} + +// runSpecToWire projects runtime.RunSpec onto the JSON shape the +// bridge expects. Fields the apple-container backend does not support +// (RunArgs, Privileged, SecurityOpt) are rejected by +// rejectUnsupportedRunSpec before reaching this function, so they +// don't appear in the wire type at all. +func runSpecToWire(spec runtime.RunSpec) runSpecJSON { + out := runSpecJSON{ + Image: spec.Image, + ID: spec.Name, + Cmd: spec.Cmd, + Entrypoint: spec.Entrypoint, + User: spec.User, + WorkingDir: spec.WorkingDir, + Env: envMapToSlice(spec.Env), + Labels: spec.Labels, + Mounts: mapMounts(spec.Mounts), + InitProcess: spec.Init, + CapAdd: spec.CapAdd, + OverrideCommand: spec.OverrideCommand, + } + return out +} + +func envMapToSlice(m map[string]string) []string { + if len(m) == 0 { + return nil + } + out := make([]string, 0, len(m)) + for k, v := range m { + out = append(out, k+"="+v) + } + return out +} + +func mapMounts(in []runtime.MountSpec) []mountJSON { + if len(in) == 0 { + return nil + } + out := make([]mountJSON, 0, len(in)) + for _, m := range in { + out = append(out, mountJSON{ + Type: mountTypeToWire(m.Type), + Source: m.Source, + Target: m.Target, + ReadOnly: m.ReadOnly, + }) + } + return out +} + +func mountTypeToWire(t runtime.MountType) string { + switch t { + case runtime.MountBind: + return "bind" + case runtime.MountTmpfs: + return "tmpfs" + case runtime.MountVolume: + return "volume" + default: + return string(t) + } +} + +func mapRunErr(image string, err error) error { + if err == nil { + return nil + } + msg := err.Error() + // Apple emits `notFound: "image with reference "` from + // ClientImage.get; less-specific paths might surface "no such + // image" / "no metadata for image" / "image not found". Match + // loosely so wording shifts don't silently drop the typed-error + // contract. + if containsAny(msg, "image with reference", "image not found", "no such image", "no metadata for image") { + return &runtime.ImageNotFoundError{Ref: image, Err: err} + } + return err +} + +func mapLifecycleErr(id string, err error) error { + if err == nil { + return nil + } + msg := err.Error() + if containsAny(msg, "notFound", "not found") { + return &runtime.ContainerNotFoundError{ID: id, Err: err} + } + return err +} diff --git a/runtime/applecontainer/lifecycle_darwin_arm64_test.go b/runtime/applecontainer/lifecycle_darwin_arm64_test.go new file mode 100644 index 0000000..ee04e92 --- /dev/null +++ b/runtime/applecontainer/lifecycle_darwin_arm64_test.go @@ -0,0 +1,192 @@ +//go:build darwin && arm64 + +package applecontainer + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/crunchloop/devcontainer/runtime" +) + +// TestLifecycle_EndToEnd exercises the full PR-C surface: +// Run → Start → Inspect (running) → Stop → Inspect (stopped) → Remove +// +// Validates the create/start split + the JSON wire shape + +// integration with PR-B's InspectContainer. Skips when the daemon is +// down. +func TestLifecycle_EndToEnd(t *testing.T) { + rt := runtimeOrSkip(t) + ctx := context.Background() + + const id = "ac-lifecycle-e2e" + // Pre-clean leftover from a previous failed run. + _ = rt.RemoveContainer(ctx, id, runtime.RemoveOptions{Force: true}) + t.Cleanup(func() { + _ = rt.RemoveContainer(ctx, id, runtime.RemoveOptions{Force: true}) + }) + + // Ensure alpine is locally cached (RunContainer requires it). + cliRunStrict(t, + "run", "--rm", "--name", "ac-alpine-warmup", + "docker.io/library/alpine:latest", "/bin/true", + ) + + created, err := rt.RunContainer(ctx, runtime.RunSpec{ + Image: "docker.io/library/alpine:latest", + Name: id, + Cmd: []string{"sleep", "120"}, + Env: map[string]string{"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"}, + Labels: map[string]string{ + "dev.containers.id": "lifecycle-test-7", + "dev.containers.engine": "devcontainer-go/test", + }, + }) + if err != nil { + t.Fatalf("RunContainer: %v", err) + } + if created.ID != id { + t.Errorf("created.ID: want %q got %q", id, created.ID) + } + if created.State != runtime.StateCreated { + t.Errorf("created.State: want %q got %q", runtime.StateCreated, created.State) + } + + if err := rt.StartContainer(ctx, id); err != nil { + t.Fatalf("StartContainer: %v", err) + } + + // Give the apiserver a beat to flip status; the snapshot reflects + // the latest known state of the runtime status finite-state machine. + if err := waitForState(t, rt, id, runtime.StateRunning, 5*time.Second); err != nil { + t.Fatalf("waiting for running: %v", err) + } + + // Inspect should now see the running container with our labels. + details, err := rt.InspectContainer(ctx, id) + if err != nil { + t.Fatalf("InspectContainer (running): %v", err) + } + if got := details.Labels["dev.containers.id"]; got != "lifecycle-test-7" { + t.Errorf("Labels[dev.containers.id]: want %q got %q", "lifecycle-test-7", got) + } + + // Idempotent start: calling again on a running container should + // no-op (matches CLI behavior + Docker semantics). + if err := rt.StartContainer(ctx, id); err != nil { + t.Errorf("StartContainer (idempotent): %v", err) + } + + if err := rt.StopContainer(ctx, id, runtime.StopOptions{Timeout: 3 * time.Second}); err != nil { + t.Fatalf("StopContainer: %v", err) + } + + if err := waitForState(t, rt, id, runtime.StateExited, 5*time.Second); err != nil { + t.Fatalf("waiting for stopped: %v", err) + } + + if err := rt.RemoveContainer(ctx, id, runtime.RemoveOptions{}); err != nil { + t.Fatalf("RemoveContainer: %v", err) + } + + // Post-remove inspect should now return ContainerNotFoundError. + if _, err := rt.InspectContainer(ctx, id); err == nil { + t.Error("InspectContainer after Remove: want error, got nil") + } else { + var nf *runtime.ContainerNotFoundError + if !errors.As(err, &nf) { + t.Errorf("InspectContainer after Remove: want *ContainerNotFoundError, got %T: %v", err, err) + } + } +} + +// TestRunContainer_MissingImage asserts the typed-error contract for +// the load-bearing case: a caller asks us to create a container against +// an image that hasn't been pulled. +func TestRunContainer_MissingImage(t *testing.T) { + rt := runtimeOrSkip(t) + _, err := rt.RunContainer(context.Background(), runtime.RunSpec{ + Image: "docker.io/library/does-not-exist-zzz:0", + Name: "ac-missing-image", + Cmd: []string{"/bin/true"}, + }) + if err == nil { + t.Fatal("RunContainer: want error, got nil") + } + var nf *runtime.ImageNotFoundError + if !errors.As(err, &nf) { + t.Logf("err: %T %v", err, err) + t.Fatalf("want *ImageNotFoundError, got %T", err) + } +} + +// TestRunContainer_BindMount creates a container with a virtiofs bind +// mount and verifies the inspect path round-trips it. Doesn't run the +// container — just exercises the mount-spec wiring. +func TestRunContainer_BindMount(t *testing.T) { + rt := runtimeOrSkip(t) + ctx := context.Background() + const id = "ac-bindmount-test" + _ = rt.RemoveContainer(ctx, id, runtime.RemoveOptions{Force: true}) + t.Cleanup(func() { + _ = rt.RemoveContainer(ctx, id, runtime.RemoveOptions{Force: true}) + }) + + hostDir := t.TempDir() + + cliRunStrict(t, + "run", "--rm", "--name", "ac-alpine-warmup", + "docker.io/library/alpine:latest", "/bin/true", + ) + + _, err := rt.RunContainer(ctx, runtime.RunSpec{ + Image: "docker.io/library/alpine:latest", + Name: id, + Cmd: []string{"sleep", "60"}, + Mounts: []runtime.MountSpec{ + {Type: runtime.MountBind, Source: hostDir, Target: "/mnt/work", ReadOnly: false}, + }, + }) + if err != nil { + t.Fatalf("RunContainer with bind: %v", err) + } + + details, err := rt.InspectContainer(ctx, id) + if err != nil { + t.Fatalf("InspectContainer: %v", err) + } + // Apple normalizes the source via URL(fileURLWithPath:).absolutePath(), + // which canonically appends a trailing slash for directories. Match + // with TrimRight to absorb that without becoming hostage to the + // quirk in case it changes. + var found bool + for _, m := range details.Mounts { + if m.Target == "/mnt/work" && + strings.TrimRight(m.Source, "/") == strings.TrimRight(hostDir, "/") { + found = true + } + } + if !found { + t.Errorf("bind mount not round-tripped; details.Mounts=%+v want target=/mnt/work source=%q", + details.Mounts, hostDir) + } +} + +// waitForState polls InspectContainer until the desired state is +// observed or the timeout fires. Apple's runtime status transitions +// asynchronously through the apiserver event loop. +func waitForState(t *testing.T, rt *Runtime, id string, want runtime.State, timeout time.Duration) error { + t.Helper() + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + d, err := rt.InspectContainer(context.Background(), id) + if err == nil && d.State == want { + return nil + } + time.Sleep(150 * time.Millisecond) + } + return errors.New("timeout waiting for state " + string(want)) +} diff --git a/runtime/applecontainer/logs_darwin_arm64.go b/runtime/applecontainer/logs_darwin_arm64.go new file mode 100644 index 0000000..111e743 --- /dev/null +++ b/runtime/applecontainer/logs_darwin_arm64.go @@ -0,0 +1,129 @@ +//go:build darwin && arm64 + +package applecontainer + +/* +#include +#include "shim.h" +*/ +import "C" + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "time" + "unsafe" +) + +type logsOpenData struct { + FD int32 `json:"fd"` +} + +// ContainerLogs streams the container's stdio log to w. The +// underlying log file is on disk; non-follow reads to EOF and +// returns. Follow polls after EOF for new data and keeps streaming +// until ctx is cancelled — closing the fd from a watcher goroutine +// unblocks the read. +// +// Boot logs (index 1 from Apple's logs API) are not surfaced. +func (r *Runtime) ContainerLogs(ctx context.Context, id string, w io.Writer, follow bool) error { + if err := ctx.Err(); err != nil { + return err + } + if w == nil { + return errors.New("applecontainer: log writer is nil") + } + if err := ensureLoaded(); err != nil { + return err + } + cID := C.CString(id) + defer C.free(unsafe.Pointer(cID)) + raw := goStringAndFree(C.ac_logs_open_p(cID)) + if raw == "" { + return errors.New("applecontainer: bridge returned nil for ContainerLogs") + } + env, err := decodeEnvelope[logsOpenData](raw) + if err != nil { + return mapLifecycleErr(id, err) + } + if env.decoded.FD < 0 { + return errors.New("applecontainer: bridge returned invalid log fd") + } + file := os.NewFile(uintptr(env.decoded.FD), "applecontainer-logs") + if file == nil { + return errors.New("applecontainer: os.NewFile failed for log fd") + } + defer file.Close() + + // ctx-watcher: closing the file unblocks any in-flight Read. + // Use a Once so the file isn't closed twice (defer also runs). + stop := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + _ = file.Close() + case <-stop: + } + }() + defer close(stop) + + return copyLogs(ctx, file, w, follow) +} + +// copyLogs reads from src to w. In follow mode, it sleeps briefly on +// zero-byte reads (EOF on a still-open regular file means "no new +// data yet"). Always returns ctx.Err() if ctx is cancelled, even if +// the underlying file close manifests as an EBADF / "file already +// closed" error. +func copyLogs(ctx context.Context, src *os.File, dst io.Writer, follow bool) error { + buf := make([]byte, 32*1024) + pollInterval := 200 * time.Millisecond + for { + n, err := src.Read(buf) + if n > 0 { + // io.Writer is allowed to short-write without returning + // an error; loop until the chunk is fully drained so we + // never silently drop log bytes. + off := 0 + for off < n { + written, werr := dst.Write(buf[off:n]) + if werr != nil { + return fmt.Errorf("applecontainer: log writer: %w", werr) + } + if written == 0 { + return fmt.Errorf("applecontainer: log writer: %w", io.ErrShortWrite) + } + off += written + } + } + if err == nil { + continue + } + if errors.Is(err, io.EOF) || err == io.EOF { + if !follow { + return nil + } + // Wait for new data, but bail if ctx is done. We can't + // use a single timer that resets per loop without making + // the close-on-cancel race tricky; the sleep-poll is + // simple and bounded. + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(pollInterval): + } + continue + } + // Read errored — either ctx cancellation triggered a close + // or the apiserver did something unexpected. Prefer ctx.Err() + // if cancelled; that's the contract caller wired up. + if cErr := ctx.Err(); cErr != nil { + return cErr + } + return fmt.Errorf("applecontainer: log read: %w", err) + } +} + diff --git a/runtime/applecontainer/logs_darwin_arm64_test.go b/runtime/applecontainer/logs_darwin_arm64_test.go new file mode 100644 index 0000000..9fbf799 --- /dev/null +++ b/runtime/applecontainer/logs_darwin_arm64_test.go @@ -0,0 +1,191 @@ +//go:build darwin && arm64 + +package applecontainer + +import ( + "bytes" + "context" + "errors" + "strings" + "sync" + "testing" + "time" + + "github.com/crunchloop/devcontainer/runtime" +) + +// chattyContainer runs an alpine container whose init process emits +// known output on a known cadence. Lets us assert on the log stream +// shape without poking at non-deterministic kernel/runtime chatter. +func chattyContainer(t *testing.T, id, script string) *Runtime { + t.Helper() + rt := runtimeOrSkip(t) + ctx := context.Background() + + _ = rt.RemoveContainer(ctx, id, runtime.RemoveOptions{Force: true}) + t.Cleanup(func() { + _ = rt.RemoveContainer(ctx, id, runtime.RemoveOptions{Force: true}) + }) + + cliRunStrict(t, + "run", "--rm", "--name", "ac-alpine-warmup", + "docker.io/library/alpine:latest", "/bin/true", + ) + + if _, err := rt.RunContainer(ctx, runtime.RunSpec{ + Image: "docker.io/library/alpine:latest", + Name: id, + Cmd: []string{"/bin/sh", "-c", script}, + }); err != nil { + t.Fatalf("RunContainer: %v", err) + } + if err := rt.StartContainer(ctx, id); err != nil { + t.Fatalf("StartContainer: %v", err) + } + if err := waitForState(t, rt, id, runtime.StateRunning, 5*time.Second); err != nil { + t.Fatalf("waitForState: %v", err) + } + return rt +} + +// TestLogs_NonFollow asserts the non-follow path reads everything +// emitted so far and returns. +func TestLogs_NonFollow(t *testing.T) { + const id = "ac-logs-nonfollow" + // Emit two short lines then sleep so the log is bounded. + rt := chattyContainer(t, id, "echo hello-from-logs-1; echo hello-from-logs-2; sleep 60") + + // Poll under a bounded timeout until both markers appear (or fail). + // Avoids a flaky fixed sleep on slow log-flush and an unbounded + // context.Background() on regressions. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + var ( + buf bytes.Buffer + got string + ) + deadline := time.Now().Add(3 * time.Second) + for { + buf.Reset() + if err := rt.ContainerLogs(ctx, id, &buf, false); err != nil { + t.Fatalf("ContainerLogs: %v", err) + } + got = buf.String() + if strings.Contains(got, "hello-from-logs-1") && strings.Contains(got, "hello-from-logs-2") { + break + } + if time.Now().After(deadline) { + t.Fatalf("logs missing markers; got %q", got) + } + time.Sleep(100 * time.Millisecond) + } +} + +// TestLogs_FollowBlocksUntilCancel verifies follow mode keeps reading +// past EOF until ctx fires. Runs a container that emits a marker +// every second, then cancels after observing the first marker — we +// expect ContainerLogs to return ctx.Err() promptly. +func TestLogs_FollowBlocksUntilCancel(t *testing.T) { + const id = "ac-logs-follow" + rt := chattyContainer(t, id, + `i=0; while true; do echo follow-marker-$i; i=$((i+1)); sleep 1; done`, + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pr, pw := newPipeWriter() + + var ( + wg sync.WaitGroup + logErr error + ) + wg.Add(1) + go func() { + defer wg.Done() + logErr = rt.ContainerLogs(ctx, id, pw, true) + pw.Close() + }() + + // Block until we see the first marker line. + if err := pr.waitFor("follow-marker-", 8*time.Second); err != nil { + t.Fatalf("waiting for marker: %v (collected so far=%q)", err, pr.collected()) + } + + cancel() + + done := make(chan struct{}) + go func() { wg.Wait(); close(done) }() + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("ContainerLogs did not return within 5s of ctx cancel") + } + + if !errors.Is(logErr, context.Canceled) { + t.Errorf("logs err: want context.Canceled, got %v", logErr) + } +} + +// pipeWriter is a tiny in-memory streaming sink: a goroutine can +// write to it while another waits for a substring to appear. Avoids +// the racy hand-rolled bytes.Buffer + sleep loop most tests reach for. +type pipeWriter struct { + mu sync.Mutex + cond *sync.Cond + buf []byte + done bool +} + +func newPipeWriter() (*pipeWriter, *pipeWriter) { + p := &pipeWriter{} + p.cond = sync.NewCond(&p.mu) + return p, p +} + +func (p *pipeWriter) Write(b []byte) (int, error) { + p.mu.Lock() + defer p.mu.Unlock() + p.buf = append(p.buf, b...) + p.cond.Broadcast() + return len(b), nil +} + +func (p *pipeWriter) Close() error { + p.mu.Lock() + p.done = true + p.cond.Broadcast() + p.mu.Unlock() + return nil +} + +func (p *pipeWriter) waitFor(substr string, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + p.mu.Lock() + defer p.mu.Unlock() + for !p.matchLocked(substr) { + if p.done { + return errors.New("writer closed before substring appeared") + } + now := time.Now() + if !now.Before(deadline) { + return errors.New("timeout waiting for substring") + } + // Brief wait then re-check. + p.mu.Unlock() + time.Sleep(50 * time.Millisecond) + p.mu.Lock() + } + return nil +} + +func (p *pipeWriter) matchLocked(s string) bool { + return bytes.Contains(p.buf, []byte(s)) +} + +func (p *pipeWriter) collected() string { + p.mu.Lock() + defer p.mu.Unlock() + return string(p.buf) +} diff --git a/runtime/applecontainer/pull_darwin_arm64.go b/runtime/applecontainer/pull_darwin_arm64.go new file mode 100644 index 0000000..727520b --- /dev/null +++ b/runtime/applecontainer/pull_darwin_arm64.go @@ -0,0 +1,77 @@ +//go:build darwin && arm64 + +package applecontainer + +/* +#include +#include "shim.h" +*/ +import "C" + +import ( + "context" + "errors" + "unsafe" + + "github.com/crunchloop/devcontainer/runtime" +) + +type pullResultData struct { + Reference string `json:"reference"` + Digest string `json:"digest"` +} + +// PullImage fetches an image from a remote registry. PR-F scope: +// synchronous pull, no in-flight progress events. The runtime.Runtime +// contract accepts a BuildEvent channel; we emit a "pulling" log +// event before the call and a "completed" event after, both +// non-blocking. Fine-grained progress is a future PR. +// +// Cancellation: ctx is checked at entry. Once the bridge call is +// in-flight, ctx cancellation is best-effort — Apple's pull API +// doesn't expose a cancellation token, so the underlying pull +// continues to completion even if ctx fires. +func (r *Runtime) PullImage(ctx context.Context, ref string, events chan<- runtime.BuildEvent) (runtime.ImageRef, error) { + if err := ctx.Err(); err != nil { + return runtime.ImageRef{}, err + } + if err := ensureLoaded(); err != nil { + return runtime.ImageRef{}, err + } + emitBuildEvent(events, runtime.BuildEvent{ + Kind: runtime.BuildEventLog, + Message: "applecontainer: pulling " + ref, + }) + + cRef := C.CString(ref) + defer C.free(unsafe.Pointer(cRef)) + raw := goStringAndFree(C.ac_pull_image_p(cRef)) + if raw == "" { + return runtime.ImageRef{}, errors.New("applecontainer: bridge returned nil for PullImage") + } + env, err := decodeEnvelope[pullResultData](raw) + if err != nil { + return runtime.ImageRef{}, mapImageInspectErr(ref, err) + } + emitBuildEvent(events, runtime.BuildEvent{ + Kind: runtime.BuildEventCompleted, + Digest: env.decoded.Digest, + Message: "applecontainer: pulled " + env.decoded.Reference, + }) + return runtime.ImageRef{ + ID: env.decoded.Digest, + Tags: []string{env.decoded.Reference}, + }, nil +} + +// emitBuildEvent is a non-blocking send. Per the BuildEvent contract, +// events are best-effort: a slow consumer doesn't gate progress. +func emitBuildEvent(ch chan<- runtime.BuildEvent, ev runtime.BuildEvent) { + if ch == nil { + return + } + select { + case ch <- ev: + default: + } +} diff --git a/runtime/applecontainer/pull_darwin_arm64_test.go b/runtime/applecontainer/pull_darwin_arm64_test.go new file mode 100644 index 0000000..1d1b3b9 --- /dev/null +++ b/runtime/applecontainer/pull_darwin_arm64_test.go @@ -0,0 +1,105 @@ +//go:build darwin && arm64 + +package applecontainer + +import ( + "context" + "errors" + "os/exec" + "strings" + "testing" + + "github.com/crunchloop/devcontainer/runtime" +) + +// TestPullImage_SmallPublic pulls a tiny public image. Skips if the +// daemon is unreachable. Verifies the ImageRef + the BuildEvent +// surface (one log + one completed event, both non-blocking). +// +// Pre-removes the image first so we test the real pull path rather +// than the local-cache hit (which Apple resolves without an upstream +// fetch). +func TestPullImage_SmallPublic(t *testing.T) { + rt := runtimeOrSkip(t) + ctx := context.Background() + const ref = "docker.io/library/alpine:latest" + + // Pre-cleanup: try to delete via CLI. Best-effort. + cliRun(t, "images", "rm", ref) + cliRun(t, "image", "rm", ref) + + events := make(chan runtime.BuildEvent, 16) + imageRef, err := rt.PullImage(ctx, ref, events) + close(events) + + if err != nil { + // If `container images rm` isn't available, the image may + // still be cached from earlier tests and pull will still + // succeed. If it actually fails, surface it. + t.Fatalf("PullImage: %v", err) + } + + if imageRef.ID == "" { + t.Errorf("ImageRef.ID is empty") + } + if len(imageRef.Tags) == 0 || imageRef.Tags[0] == "" { + t.Errorf("ImageRef.Tags: want non-empty, got %v", imageRef.Tags) + } + + // We accept anywhere from 1 (just completed, with the slow- + // consumer guard dropping the log) to 2 events. Just verify at + // least the completed event made it through. + var sawCompleted bool + for ev := range events { + if ev.Kind == runtime.BuildEventCompleted { + sawCompleted = true + if ev.Digest == "" { + t.Errorf("BuildEventCompleted.Digest is empty") + } + } + } + if !sawCompleted { + t.Errorf("BuildEventCompleted not emitted") + } +} + +// TestPullImage_NoSuchImage asserts the typed-error contract — must +// translate Apple's "notFound" into runtime.ImageNotFoundError so +// engine code can do typed dispatch. +func TestPullImage_NoSuchImage(t *testing.T) { + rt := runtimeOrSkip(t) + _, err := rt.PullImage(context.Background(), + "docker.io/library/does-not-exist-zzz-99:latest", nil) + if err == nil { + t.Fatal("PullImage: want error, got nil") + } + var nf *runtime.ImageNotFoundError + if !errors.As(err, &nf) { + // Some registry-level errors don't surface as "notFound" — + // network refusal, auth — and we don't want to falsely claim + // not-found in those cases. Accept any error here, but log + // the type so a real regression surfaces in CI output. + t.Logf("non-typed pull error (acceptable for genuine network failures): %T %v", err, err) + // Best-effort assertion: real "missing image" errors should + // reach this branch. If they don't, the test still passes + // because some valid network errors land here too — but the + // CI log will show which. + if isLikelyMissingImage(err) { + t.Errorf("error looks like a missing-image error but wasn't typed: %v", err) + } + } +} + +func isLikelyMissingImage(err error) bool { + msg := strings.ToLower(err.Error()) + return strings.Contains(msg, "not found") || + strings.Contains(msg, "notfound") || + strings.Contains(msg, "no such") +} + +// cliRun reused from inspect_darwin_arm64_test.go — best-effort +// `container ...` wrapper. Re-declared here would be a duplicate; +// the import of os/exec keeps this file self-contained for the cases +// where the helper isn't useful (e.g. when running this test file +// in isolation in IDEs that don't load sibling files). +var _ = exec.LookPath diff --git a/runtime/applecontainer/runtime_darwin_arm64.go b/runtime/applecontainer/runtime_darwin_arm64.go index a553f9e..6f2f618 100644 --- a/runtime/applecontainer/runtime_darwin_arm64.go +++ b/runtime/applecontainer/runtime_darwin_arm64.go @@ -18,7 +18,6 @@ import ( "encoding/json" "errors" "fmt" - "io" "math" "time" "unsafe" @@ -140,39 +139,15 @@ func bridgeVersion() string { return C.GoString(cstr) } -// ---- runtime.Runtime stubs (filled in PR-B onward) ------------------- +// ---- runtime.Runtime method map ------------------------------------- -func (*Runtime) BuildImage(context.Context, runtime.BuildSpec, chan<- runtime.BuildEvent) (runtime.ImageRef, error) { - return runtime.ImageRef{}, runtime.ErrNotImplemented -} - -func (*Runtime) PullImage(context.Context, string, chan<- runtime.BuildEvent) (runtime.ImageRef, error) { - return runtime.ImageRef{}, runtime.ErrNotImplemented -} - -func (*Runtime) RunContainer(context.Context, runtime.RunSpec) (*runtime.Container, error) { - return nil, runtime.ErrNotImplemented -} - -func (*Runtime) StartContainer(context.Context, string) error { - return runtime.ErrNotImplemented -} +// InspectContainer, InspectImage, FindContainerByLabel — PR-B +// (inspect_darwin_arm64.go). +// RunContainer, StartContainer, StopContainer, RemoveContainer — PR-C +// (lifecycle_darwin_arm64.go). +// ExecContainer — PR-D (exec_darwin_arm64.go). +// ContainerLogs — PR-E (logs_darwin_arm64.go). +// PullImage — PR-F (pull_darwin_arm64.go). +// BuildImage — PR-G (build_darwin_arm64.go, partial: builder probe + +// typed not-implemented error; full BuildKit wiring is a follow-up). -func (*Runtime) StopContainer(context.Context, string, runtime.StopOptions) error { - return runtime.ErrNotImplemented -} - -func (*Runtime) RemoveContainer(context.Context, string, runtime.RemoveOptions) error { - return runtime.ErrNotImplemented -} - -func (*Runtime) ExecContainer(context.Context, string, runtime.ExecOptions) (runtime.ExecResult, error) { - return runtime.ExecResult{}, runtime.ErrNotImplemented -} - -// InspectContainer, InspectImage, and FindContainerByLabel live in -// inspect_darwin_arm64.go (PR-B). - -func (*Runtime) ContainerLogs(context.Context, string, io.Writer, bool) error { - return runtime.ErrNotImplemented -} diff --git a/runtime/applecontainer/shim.c b/runtime/applecontainer/shim.c index ba29733..bd1cf1f 100644 --- a/runtime/applecontainer/shim.c +++ b/runtime/applecontainer/shim.c @@ -14,6 +14,21 @@ static const char* (*p_ac_inspect_container)(const char*) = NULL; static const char* (*p_ac_inspect_image)(const char*) = NULL; static const char* (*p_ac_find_container_by_label)(const char*, const char*) = NULL; +static const char* (*p_ac_run)(const char*) = NULL; +static const char* (*p_ac_start)(const char*) = NULL; +static const char* (*p_ac_stop)(const char*, int32_t) = NULL; +static const char* (*p_ac_delete)(const char*, int32_t) = NULL; + +static const char* (*p_ac_exec_start)(const char*, const char*, int32_t, int32_t, int32_t) = NULL; +static const char* (*p_ac_exec_wait)(uint64_t, int32_t) = NULL; +static const char* (*p_ac_exec_signal)(uint64_t, int32_t) = NULL; +static void (*p_ac_exec_release)(uint64_t) = NULL; + +static const char* (*p_ac_logs_open)(const char*) = NULL; +static const char* (*p_ac_pull_image)(const char*) = NULL; +static const char* (*p_ac_build_probe)(void) = NULL; +static const char* (*p_ac_build)(const char*) = NULL; + static void copy_err(char* errbuf, size_t errlen, const char* msg) { if (!errbuf || errlen == 0) { return; @@ -48,10 +63,27 @@ int ac_load(const char* path, char* errbuf, size_t errlen) { p_ac_inspect_container = (const char* (*)(const char*)) dlsym(h, "ac_inspect_container"); p_ac_inspect_image = (const char* (*)(const char*)) dlsym(h, "ac_inspect_image"); p_ac_find_container_by_label = (const char* (*)(const char*, const char*)) dlsym(h, "ac_find_container_by_label"); + p_ac_run = (const char* (*)(const char*)) dlsym(h, "ac_run"); + p_ac_start = (const char* (*)(const char*)) dlsym(h, "ac_start"); + p_ac_stop = (const char* (*)(const char*, int32_t)) dlsym(h, "ac_stop"); + p_ac_delete = (const char* (*)(const char*, int32_t)) dlsym(h, "ac_delete"); + p_ac_exec_start = (const char* (*)(const char*, const char*, int32_t, int32_t, int32_t)) dlsym(h, "ac_exec_start"); + p_ac_exec_wait = (const char* (*)(uint64_t, int32_t)) dlsym(h, "ac_exec_wait"); + p_ac_exec_signal = (const char* (*)(uint64_t, int32_t)) dlsym(h, "ac_exec_signal"); + p_ac_exec_release = (void (*)(uint64_t)) dlsym(h, "ac_exec_release"); + p_ac_logs_open = (const char* (*)(const char*)) dlsym(h, "ac_logs_open"); + p_ac_pull_image = (const char* (*)(const char*)) dlsym(h, "ac_pull_image"); + p_ac_build_probe = (const char* (*)(void)) dlsym(h, "ac_build_probe"); + p_ac_build = (const char* (*)(const char*)) dlsym(h, "ac_build"); if (!p_ac_version || !p_ac_ping || !p_ac_free || !p_ac_inspect_container || !p_ac_inspect_image - || !p_ac_find_container_by_label) { + || !p_ac_find_container_by_label + || !p_ac_run || !p_ac_start || !p_ac_stop || !p_ac_delete + || !p_ac_exec_start || !p_ac_exec_wait + || !p_ac_exec_signal || !p_ac_exec_release + || !p_ac_logs_open || !p_ac_pull_image + || !p_ac_build_probe || !p_ac_build) { const char* err = dlerror(); copy_err(errbuf, errlen, err ? err : "dlsym returned null"); // Reset any partial resolutions so a future retry sees a clean @@ -63,6 +95,18 @@ int ac_load(const char* path, char* errbuf, size_t errlen) { p_ac_inspect_container = NULL; p_ac_inspect_image = NULL; p_ac_find_container_by_label = NULL; + p_ac_run = NULL; + p_ac_start = NULL; + p_ac_stop = NULL; + p_ac_delete = NULL; + p_ac_exec_start = NULL; + p_ac_exec_wait = NULL; + p_ac_exec_signal = NULL; + p_ac_exec_release = NULL; + p_ac_logs_open = NULL; + p_ac_pull_image = NULL; + p_ac_build_probe = NULL; + p_ac_build = NULL; dlclose(h); return -1; } @@ -94,3 +138,61 @@ const char* ac_inspect_image_p(const char* reference) { const char* ac_find_container_by_label_p(const char* key, const char* value) { return p_ac_find_container_by_label ? p_ac_find_container_by_label(key, value) : NULL; } + +const char* ac_run_p(const char* spec_json) { + return p_ac_run ? p_ac_run(spec_json) : NULL; +} + +const char* ac_start_p(const char* id) { + return p_ac_start ? p_ac_start(id) : NULL; +} + +const char* ac_stop_p(const char* id, int32_t timeout_seconds) { + return p_ac_stop ? p_ac_stop(id, timeout_seconds) : NULL; +} + +const char* ac_delete_p(const char* id, int32_t force) { + return p_ac_delete ? p_ac_delete(id, force) : NULL; +} + +const char* ac_exec_start_p( + const char* id, + const char* opts_json, + int32_t stdin_read_fd, + int32_t stdout_write_fd, + int32_t stderr_write_fd +) { + return p_ac_exec_start + ? p_ac_exec_start(id, opts_json, stdin_read_fd, stdout_write_fd, stderr_write_fd) + : NULL; +} + +const char* ac_exec_wait_p(uint64_t handle, int32_t timeout_seconds) { + return p_ac_exec_wait ? p_ac_exec_wait(handle, timeout_seconds) : NULL; +} + +const char* ac_exec_signal_p(uint64_t handle, int32_t signal) { + return p_ac_exec_signal ? p_ac_exec_signal(handle, signal) : NULL; +} + +void ac_exec_release_p(uint64_t handle) { + if (p_ac_exec_release) { + p_ac_exec_release(handle); + } +} + +const char* ac_logs_open_p(const char* id) { + return p_ac_logs_open ? p_ac_logs_open(id) : NULL; +} + +const char* ac_pull_image_p(const char* reference) { + return p_ac_pull_image ? p_ac_pull_image(reference) : NULL; +} + +const char* ac_build_probe_p(void) { + return p_ac_build_probe ? p_ac_build_probe() : NULL; +} + +const char* ac_build_p(const char* spec_json) { + return p_ac_build ? p_ac_build(spec_json) : NULL; +} diff --git a/runtime/applecontainer/shim.h b/runtime/applecontainer/shim.h index 5d30a53..7f6af5a 100644 --- a/runtime/applecontainer/shim.h +++ b/runtime/applecontainer/shim.h @@ -36,4 +36,27 @@ const char* ac_inspect_container_p(const char* id); const char* ac_inspect_image_p(const char* reference); const char* ac_find_container_by_label_p(const char* key, const char* value); +const char* ac_run_p(const char* spec_json); +const char* ac_start_p(const char* id); +const char* ac_stop_p(const char* id, int32_t timeout_seconds); +const char* ac_delete_p(const char* id, int32_t force); + +const char* ac_exec_start_p( + const char* id, + const char* opts_json, + int32_t stdin_read_fd, + int32_t stdout_write_fd, + int32_t stderr_write_fd +); +const char* ac_exec_wait_p(uint64_t handle, int32_t timeout_seconds); +const char* ac_exec_signal_p(uint64_t handle, int32_t signal); +void ac_exec_release_p(uint64_t handle); + +const char* ac_logs_open_p(const char* id); + +const char* ac_pull_image_p(const char* reference); + +const char* ac_build_probe_p(void); +const char* ac_build_p(const char* spec_json); + #endif diff --git a/runtime/errors.go b/runtime/errors.go index b400e79..a89483f 100644 --- a/runtime/errors.go +++ b/runtime/errors.go @@ -91,3 +91,37 @@ func (e *DaemonUnavailableError) Error() string { } func (e *DaemonUnavailableError) Unwrap() error { return e.Err } + +// BuilderUnavailableError indicates the container engine's image-build +// component is missing or not running. Distinct from +// DaemonUnavailableError because the build engine is typically a +// separate process / VM that can be started independently (e.g. +// Apple's `container builder start`, Docker's BuildKit daemon). +type BuilderUnavailableError struct { + // Hint is a backend-specific message telling the user how to + // remediate (e.g. "run `container builder start`"). + Hint string + Err error +} + +func (e *BuilderUnavailableError) Error() string { + if e.Hint != "" { + return fmt.Sprintf("image build engine unavailable (%s): %v", e.Hint, e.Err) + } + return fmt.Sprintf("image build engine unavailable: %v", e.Err) +} + +func (e *BuilderUnavailableError) Unwrap() error { return e.Err } + +// UnsupportedOptionError indicates a RunSpec / BuildSpec field that +// the chosen backend cannot honor. Returned at the boundary instead +// of silently dropping the option, so callers fail fast rather than +// observing apparent success with missing behavior. +type UnsupportedOptionError struct { + Backend string + Option string +} + +func (e *UnsupportedOptionError) Error() string { + return fmt.Sprintf("%s: option %q is not supported on this backend", e.Backend, e.Option) +} diff --git a/test/integration/applecontainer_build_source_test.go b/test/integration/applecontainer_build_source_test.go new file mode 100644 index 0000000..1b46257 --- /dev/null +++ b/test/integration/applecontainer_build_source_test.go @@ -0,0 +1,123 @@ +//go:build integration && darwin && arm64 + +// Apple-container backend: build-source devcontainers (no features). +// Newly enabled by PR-G2's full BuildKit integration. Replaces the +// PR-H stub (TestAppleContainer_BuildSource_DocumentsLimitation) for +// the happy path; the limitation test stays as a builder-not-running +// contract assertion. + +package integration + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + "time" + + devcontainer "github.com/crunchloop/devcontainer" +) + +// TestAppleContainer_BuildSource_FullLifecycle is the basic +// build-source Up/Exec/Down: devcontainer.json declares +// `build: { dockerfile: ... }`, the engine builds via PR-G2's +// BuildImage, runs the resulting image, exec confirms a baked-in +// marker file is present. +func TestAppleContainer_BuildSource_FullLifecycle(t *testing.T) { + if testing.Short() { + t.Skip() + } + + eng, _ := newAppleContainerEngine(t) + + ws := t.TempDir() + dcDir := filepath.Join(ws, ".devcontainer") + if err := os.MkdirAll(dcDir, 0o755); err != nil { + t.Fatal(err) + } + if err := writeFile(filepath.Join(dcDir, "Dockerfile"), + "FROM docker.io/library/alpine:latest\nRUN echo built-by-bucket-a > /bucket-a-marker\n"); err != nil { + t.Fatal(err) + } + if err := writeFile(filepath.Join(dcDir, "devcontainer.json"), + `{"build":{"dockerfile":"Dockerfile"}}`); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"cat", "/bucket-a-marker"}, + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if got := strings.TrimSpace(res.Stdout); got != "built-by-bucket-a" { + t.Errorf("marker contents = %q, want %q", got, "built-by-bucket-a") + } +} + +// TestAppleContainer_BuildSource_BuildArgs verifies build args +// from devcontainer.json reach the Dockerfile. +func TestAppleContainer_BuildSource_BuildArgs(t *testing.T) { + if testing.Short() { + t.Skip() + } + + eng, _ := newAppleContainerEngine(t) + + ws := t.TempDir() + dcDir := filepath.Join(ws, ".devcontainer") + if err := os.MkdirAll(dcDir, 0o755); err != nil { + t.Fatal(err) + } + if err := writeFile(filepath.Join(dcDir, "Dockerfile"), + "FROM docker.io/library/alpine:latest\nARG MYARG=unset\nRUN echo $MYARG > /arg-marker\n"); err != nil { + t.Fatal(err) + } + if err := writeFile(filepath.Join(dcDir, "devcontainer.json"), `{ + "build": { + "dockerfile": "Dockerfile", + "args": {"MYARG": "value-from-config"} + } + }`); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"cat", "/arg-marker"}, + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if got := strings.TrimSpace(res.Stdout); got != "value-from-config" { + t.Errorf("$MYARG = %q, want %q (build args not plumbed)", got, "value-from-config") + } +} diff --git a/test/integration/applecontainer_features_test.go b/test/integration/applecontainer_features_test.go new file mode 100644 index 0000000..3f165a8 --- /dev/null +++ b/test/integration/applecontainer_features_test.go @@ -0,0 +1,107 @@ +//go:build integration && darwin && arm64 + +// Apple-container backend: features probe (A2). The feature pipeline +// is engine-level Go code (feature/* + engine.up's layerFeatures path) +// that ultimately calls runtime.BuildImage — which apple-container +// now has since PR-G2. The pipeline should "just work" on this +// backend. +// +// This file ships ONE probe test. If it passes, features work on +// apple-container with no further changes. If it fails, it fails the +// suite loudly — feature support is a design-level contract on this +// backend, so a regression here is a real bug, not a "known TODO". + +package integration + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + devcontainer "github.com/crunchloop/devcontainer" +) + +// TestAppleContainer_Features_LocalFeatureInstalls probes the feature +// pipeline end-to-end: image-source + a local feature that drops a +// marker file. Mirrors TestImageSource_WithLocalFeature in the docker +// suite. +func TestAppleContainer_Features_LocalFeatureInstalls(t *testing.T) { + if testing.Short() { + t.Skip() + } + + eng, _ := newAppleContainerEngine(t) + + dir := t.TempDir() + dcDir := filepath.Join(dir, ".devcontainer") + if err := os.MkdirAll(dcDir, 0o755); err != nil { + t.Fatal(err) + } + if err := writeFile(filepath.Join(dcDir, "devcontainer.json"), fmt.Sprintf(`{ + "image": "%s", + "features": { "./local-feature": {} } + }`, "docker.io/library/alpine:latest")); err != nil { + t.Fatal(err) + } + + featureDir := filepath.Join(dcDir, "local-feature") + if err := os.MkdirAll(featureDir, 0o755); err != nil { + t.Fatal(err) + } + if err := writeFile(filepath.Join(featureDir, "devcontainer-feature.json"), `{ + "id": "stamp", + "version": "1.0.0", + "containerEnv": { "STAMP": "from-apple-feature" } + }`); err != nil { + t.Fatal(err) + } + if err := writeFile(filepath.Join(featureDir, "install.sh"), `#!/bin/sh +set -e +echo apple-feature-ran > /etc/feature-marker +`); err != nil { + t.Fatal(err) + } + if err := os.Chmod(filepath.Join(featureDir, "install.sh"), 0o755); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: dir, + Recreate: true, + SkipLifecycle: true, + }) + if err != nil { + t.Fatalf("feature install failed on apple-container: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"cat", "/etc/feature-marker"}, + }) + if err != nil { + t.Fatalf("Exec feature-marker: %v", err) + } + if !strings.Contains(res.Stdout, "apple-feature-ran") { + t.Errorf("feature did not run on apple-container image-source path: %q", res.Stdout) + } + + // STAMP env from the feature should reach Exec. + res, err = eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"printenv", "STAMP"}, + }) + if err != nil { + t.Fatalf("Exec printenv STAMP: %v", err) + } + if got := strings.TrimSpace(res.Stdout); got != "from-apple-feature" { + t.Errorf("STAMP = %q, want %q (feature containerEnv didn't reach Exec)", got, "from-apple-feature") + } +} diff --git a/test/integration/applecontainer_image_metadata_test.go b/test/integration/applecontainer_image_metadata_test.go new file mode 100644 index 0000000..e5ea67b --- /dev/null +++ b/test/integration/applecontainer_image_metadata_test.go @@ -0,0 +1,156 @@ +//go:build integration && darwin && arm64 + +// Apple-container backend: image-metadata fast path. Builds an image +// carrying a devcontainer.metadata label, then exercises both the +// "metadata declares remoteUser" path and the "user override wins" +// path. Mirrors image_metadata_test.go for the docker backend. + +package integration + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + "time" + + devcontainer "github.com/crunchloop/devcontainer" + "github.com/crunchloop/devcontainer/runtime" + "github.com/crunchloop/devcontainer/runtime/applecontainer" +) + +// buildAppleLabeledImage builds a small image carrying a +// devcontainer.metadata label + a baked-in non-root user. Uses our +// PR-G2 BuildImage path under the hood. +func buildAppleLabeledImage(t *testing.T, rt *applecontainer.Runtime, label string) string { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + dir := t.TempDir() + // Use docker.io/library/alpine:latest explicitly — Apple's image + // resolver doesn't apply a default registry like Docker's does. + df := `FROM docker.io/library/alpine:latest +RUN adduser -D -s /bin/sh vscode +LABEL devcontainer.metadata='` + label + `' +` + if err := os.WriteFile(filepath.Join(dir, "Dockerfile"), []byte(df), 0o644); err != nil { + t.Fatal(err) + } + + tag := "dc-it-ac-metadata-" + strings.ReplaceAll(strings.ToLower(t.Name()), "/", "-") + ":latest" + + if _, err := rt.BuildImage(ctx, runtime.BuildSpec{ + ContextPath: dir, + Dockerfile: "Dockerfile", + Tag: tag, + }, nil); err != nil { + var unavail *runtime.BuilderUnavailableError + if isUnavailErr(err, &unavail) { + t.Skipf("builder not running (run `container builder start`): %v", err) + } + t.Fatalf("BuildImage: %v", err) + } + return tag +} + +// isUnavailErr is a tiny generic-ish wrapper around errors.As so the +// per-test boilerplate stays compact. Returns true if err wraps a +// *BuilderUnavailableError. +func isUnavailErr(err error, dst **runtime.BuilderUnavailableError) bool { + if err == nil { + return false + } + // Walk the chain via Unwrap; avoid importing errors twice. + for cur := err; cur != nil; { + if v, ok := cur.(*runtime.BuilderUnavailableError); ok { + *dst = v + return true + } + type unwrapper interface{ Unwrap() error } + u, ok := cur.(unwrapper) + if !ok { + break + } + cur = u.Unwrap() + } + return false +} + +func TestAppleContainer_ImageMetadata_RemoteUserHonored(t *testing.T) { + if testing.Short() { + t.Skip("integration tests skipped with -short") + } + + eng, rt := newAppleContainerEngine(t) + + tag := buildAppleLabeledImage(t, rt, + `[{"id":"common-utils","version":"2"},{"remoteUser":"vscode","containerUser":"vscode"}]`) + + ws := writeWorkspace(t, `{"image":"`+tag+`"}`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"whoami"}, + }) + if err != nil { + t.Fatalf("Exec whoami: %v", err) + } + if res.ExitCode != 0 { + t.Fatalf("whoami exit=%d stderr=%q", res.ExitCode, res.Stderr) + } + if got := strings.TrimSpace(res.Stdout); got != "vscode" { + t.Errorf("whoami = %q, want %q (image-metadata remoteUser must reach Engine.Exec)", got, "vscode") + } +} + +func TestAppleContainer_ImageMetadata_UserOverrideWins(t *testing.T) { + if testing.Short() { + t.Skip() + } + + eng, rt := newAppleContainerEngine(t) + + tag := buildAppleLabeledImage(t, rt, + `[{"remoteUser":"vscode","containerUser":"vscode"}]`) + + ws := writeWorkspace(t, `{"image":"`+tag+`","remoteUser":"root"}`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"whoami"}, + }) + if err != nil { + t.Fatalf("Exec whoami: %v", err) + } + if got := strings.TrimSpace(res.Stdout); got != "root" { + t.Errorf("whoami = %q, want %q (devcontainer.json remoteUser must beat image metadata)", got, "root") + } +} diff --git a/test/integration/applecontainer_image_source_test.go b/test/integration/applecontainer_image_source_test.go new file mode 100644 index 0000000..f7d28a7 --- /dev/null +++ b/test/integration/applecontainer_image_source_test.go @@ -0,0 +1,341 @@ +//go:build integration && darwin && arm64 + +// Apple-container backend integration tests. PR-H ships the +// image-source full-lifecycle test — Up, Exec, Down — and documents +// the subset of M2/M3 fixtures we expect to pass on this backend +// today. Build / features / compose / UID-reconcile are out of scope +// (see design/runtime-applecontainer.md §8, §9, §13.8) and will +// land alongside their respective Runtime methods (PR-G2 for build). +// +// To run: `make bridge && go test -tags=integration ./test/integration/...` +// Daemon prerequisite: `brew install container && container system start`. + +package integration + +import ( + "context" + "errors" + "os" + "strings" + "testing" + "time" + + devcontainer "github.com/crunchloop/devcontainer" + "github.com/crunchloop/devcontainer/runtime" + "github.com/crunchloop/devcontainer/runtime/applecontainer" +) + +func newAppleContainerEngine(t *testing.T) (*devcontainer.Engine, *applecontainer.Runtime) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + rt, err := applecontainer.New(ctx, applecontainer.Options{PingTimeoutSeconds: 5}) + if err != nil { + var unavail *runtime.DaemonUnavailableError + if errors.As(err, &unavail) { + t.Skipf("apple-container daemon unreachable (`container system start` required): %v", err) + } + t.Fatalf("applecontainer.New: %v", err) + } + eng, err := devcontainer.New(devcontainer.EngineOptions{Runtime: rt}) + if err != nil { + t.Fatalf("devcontainer.New: %v", err) + } + return eng, rt +} + +// TestAppleContainer_ImageSource_FullLifecycle proves the engine +// integration: Up an `image:` devcontainer through the apple-container +// backend, Exec, Down. Mirrors the M2 image-source test against +// runtime/docker (image_source_test.go). +func TestAppleContainer_ImageSource_FullLifecycle(t *testing.T) { + if testing.Short() { + t.Skip("integration tests skipped with -short") + } + + eng, _ := newAppleContainerEngine(t) + ws := writeWorkspace(t, `{ + "image": "docker.io/library/alpine:latest", + "containerEnv": { + "CUSTOM_VAR": "hello-from-apple-container" + } + }`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + t.Logf("Up: %s", ws) + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + downCtx, downCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer downCancel() + if err := eng.Down(downCtx, wsObj, devcontainer.DownOptions{Remove: true}); err != nil { + t.Errorf("Down: %v", err) + } + }() + + // Exec a simple command and assert stdout + the engine-injected + // containerEnv variable both make it through. + execCtx, execCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer execCancel() + res, err := eng.Exec(execCtx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"/bin/sh", "-c", "echo marker; echo CV=$CUSTOM_VAR"}, + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if res.ExitCode != 0 { + t.Errorf("Exec exit: want 0 got %d (stderr=%q)", res.ExitCode, res.Stderr) + } + if !strings.Contains(res.Stdout, "marker") { + t.Errorf("Exec stdout missing marker; got %q", res.Stdout) + } + if !strings.Contains(res.Stdout, "CV=hello-from-apple-container") { + t.Errorf("Exec stdout missing containerEnv injection; got %q", res.Stdout) + } +} + +// TestAppleContainer_BuildSource_DocumentsLimitation asserts that +// build-source devcontainers fail with our typed BuilderUnavailableError +// or the "not yet implemented" error from PR-G (whichever applies). +// This is a contract test for the partial PR-G state — when PR-G2 +// lands the real build path, this test should be removed or flipped +// to assert success. +// (removed) TestAppleContainer_BuildSource_DocumentsLimitation — +// PR-H stub that asserted build-source devcontainers failed with a +// "not yet implemented" error. PR-G2 implemented the build path, so +// the happy-path replacement is TestAppleContainer_BuildSource_FullLifecycle +// in applecontainer_build_source_test.go. The builder-down typed-error +// path is still covered by TestBuildImage_BuilderDownTypedError in +// the runtime package's unit tests. + +// TestAppleContainer_ReattachStopped covers the Up → Down(no remove) +// → Up flow: the second Up should find the stopped container by +// label and restart it rather than creating a fresh one. Exercises +// FindContainerByLabel + StartContainer-idempotency through the +// engine. +func TestAppleContainer_ReattachStopped(t *testing.T) { + if testing.Short() { + t.Skip("integration tests skipped with -short") + } + + eng, _ := newAppleContainerEngine(t) + ws := writeWorkspace(t, `{"image":"docker.io/library/alpine:latest"}`) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + first, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("first Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), first, devcontainer.DownOptions{Remove: true}) + }() + + if err := eng.Down(ctx, first, devcontainer.DownOptions{}); err != nil { + t.Fatalf("Down (no remove): %v", err) + } + + second, err := eng.Up(ctx, devcontainer.UpOptions{LocalWorkspaceFolder: ws}) + if err != nil { + t.Fatalf("second Up: %v", err) + } + // If reattach worked, `second` and `first` are the same container + // and the existing defer on `first` will clean up. But if the ID + // changed (the very bug this test is pinning), `first`'s teardown + // leaves `second` running and contaminates later tests — so + // schedule its teardown unconditionally. + defer func() { + _ = eng.Down(context.Background(), second, devcontainer.DownOptions{Remove: true}) + }() + if second.Container.ID != first.Container.ID { + t.Errorf("container id changed across reattach: first=%q second=%q", + first.Container.ID, second.Container.ID) + } + if second.Container.State != runtime.StateRunning { + t.Errorf("state after reattach = %q (want %q)", + second.Container.State, runtime.StateRunning) + } +} + +// TestAppleContainer_LifecycleAndIdempotency runs an image-source +// devcontainer with postCreate/postStart/postAttach commands and +// verifies the engine's marker-based idempotency works through the +// apple-container backend. Exercises eng.Exec extensively (each +// phase runs a shell command through the runtime's ExecContainer). +func TestAppleContainer_LifecycleAndIdempotency(t *testing.T) { + if testing.Short() { + t.Skip("integration tests skipped with -short") + } + + eng, _ := newAppleContainerEngine(t) + ws := writeWorkspace(t, `{ + "image": "docker.io/library/alpine:latest", + "postCreateCommand": "echo create >> /tmp/dc-counter", + "postStartCommand": "echo start >> /tmp/dc-counter", + "postAttachCommand": "echo attach >> /tmp/dc-counter" + }`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + out := mustRead(t, ctx, eng, wsObj, "/tmp/dc-counter") + if got := strings.Count(out, "create"); got != 1 { + t.Errorf("after first Up: create count = %d, want 1\n%s", got, out) + } + if got := strings.Count(out, "start"); got != 1 { + t.Errorf("after first Up: start count = %d, want 1\n%s", got, out) + } + if got := strings.Count(out, "attach"); got != 1 { + t.Errorf("after first Up: attach count = %d, want 1\n%s", got, out) + } + + if err := eng.Down(ctx, wsObj, devcontainer.DownOptions{}); err != nil { + t.Fatalf("Down (no remove): %v", err) + } + wsObj2, err := eng.Up(ctx, devcontainer.UpOptions{LocalWorkspaceFolder: ws}) + if err != nil { + t.Fatalf("second Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj2, devcontainer.DownOptions{Remove: true}) + }() + + out = mustRead(t, ctx, eng, wsObj2, "/tmp/dc-counter") + if got := strings.Count(out, "create"); got != 1 { + t.Errorf("after restart: create count = %d, want 1 (idempotent)\n%s", got, out) + } + if got := strings.Count(out, "start"); got != 2 { + t.Errorf("after restart: start count = %d, want 2\n%s", got, out) + } + if got := strings.Count(out, "attach"); got != 2 { + t.Errorf("after restart: attach count = %d, want 2\n%s", got, out) + } +} + +// TestAppleContainer_WorkspaceMount_Writable proves the design §13.8 +// finding holds end-to-end through the engine: a host workspace +// directory bind-mounted into the container is writable from +// inside, even though we don't run updateRemoteUserUID on this +// backend. Virtiofs is identity-permissive (every container user +// appears to own the mount); write attempts succeed regardless of +// UID matching. +func TestAppleContainer_WorkspaceMount_Writable(t *testing.T) { + if testing.Short() { + t.Skip("integration tests skipped with -short") + } + + eng, _ := newAppleContainerEngine(t) + ws := writeWorkspace(t, `{"image":"docker.io/library/alpine:latest"}`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + // The engine binds the workspace folder into the container at + // /workspaces/. Write a marker from inside. + const marker = "writable-marker-from-vm-99" + cmd := "echo " + marker + " > /workspaces/$(basename " + ws + ")/.test-write && cat /workspaces/$(basename " + ws + ")/.test-write" + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"/bin/sh", "-c", cmd}, + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if res.ExitCode != 0 { + t.Fatalf("write failed: exit=%d stderr=%q stdout=%q", res.ExitCode, res.Stderr, res.Stdout) + } + if !strings.Contains(res.Stdout, marker) { + t.Errorf("marker readback mismatch: want contains %q, got %q", marker, res.Stdout) + } + + // Confirm the file appears on the host with our content. This + // also confirms the host-side mount semantics survive the cycle. + data, err := os.ReadFile(ws + "/.test-write") + if err != nil { + t.Fatalf("host readback: %v", err) + } + if !strings.Contains(string(data), marker) { + t.Errorf("host readback content: want contains %q, got %q", marker, string(data)) + } +} + +// TestAppleContainer_ComposeSource_DocumentsLimitation asserts the +// design §9 contract: ComposeRuntime isn't implemented for this +// backend; the engine returns a clean error rather than crashing. +func TestAppleContainer_ComposeSource_DocumentsLimitation(t *testing.T) { + if testing.Short() { + t.Skip("integration tests skipped with -short") + } + + eng, _ := newAppleContainerEngine(t) + ws := writeWorkspace(t, `{ + "dockerComposeFile": "docker-compose.yml", + "service": "app" + }`) + composeYAML := "services:\n app:\n image: docker.io/library/alpine:latest\n command: ['sleep','60']\n" + if err := os.WriteFile(ws+"/.devcontainer/docker-compose.yml", []byte(composeYAML), 0o644); err != nil { + t.Fatalf("write compose: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + _, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err == nil { + t.Fatal("Up: want error for compose source on apple-container, got nil") + } + if !errors.Is(err, runtime.ErrNotImplemented) && + !strings.Contains(err.Error(), "not implemented") && + !strings.Contains(err.Error(), "compose") { + t.Errorf("error should mention compose or 'not implemented'; got %v", err) + } + t.Logf("expected compose-source rejection: %v", err) +} + +// writeFile is a small helper used across applecontainer_*_test.go +// files. Inlining os.WriteFile in every test body bloats the test +// without adding clarity. +func writeFile(path, content string) error { + return os.WriteFile(path, []byte(content), 0o644) +} + +// runtimeBuildSpec keeps BuildSpec construction terse in tests that +// only need ContextPath/Dockerfile/Tag. +func runtimeBuildSpec(contextDir, tag string) runtime.BuildSpec { + return runtime.BuildSpec{ContextPath: contextDir, Dockerfile: "Dockerfile", Tag: tag} +} + diff --git a/test/integration/applecontainer_shutdown_action_test.go b/test/integration/applecontainer_shutdown_action_test.go new file mode 100644 index 0000000..a414b23 --- /dev/null +++ b/test/integration/applecontainer_shutdown_action_test.go @@ -0,0 +1,94 @@ +//go:build integration && darwin && arm64 + +// Apple-container backend: shutdownAction semantics. Mirrors +// shutdown_action_test.go for the docker backend. + +package integration + +import ( + "context" + "testing" + "time" + + devcontainer "github.com/crunchloop/devcontainer" + "github.com/crunchloop/devcontainer/runtime" +) + +func TestAppleContainer_ShutdownAction_NoneLeavesRunning(t *testing.T) { + if testing.Short() { + t.Skip() + } + + eng, rt := newAppleContainerEngine(t) + ws := writeWorkspace(t, `{ + "image": "docker.io/library/alpine:latest", + "shutdownAction": "none" + }`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + // Force teardown — Shutdown is a no-op on "none". + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + if err := eng.Shutdown(ctx, wsObj); err != nil { + t.Fatalf("Shutdown: %v", err) + } + + details, err := rt.InspectContainer(ctx, wsObj.Container.ID) + if err != nil { + t.Fatalf("InspectContainer: %v", err) + } + if details.State != runtime.StateRunning { + t.Errorf("container state after Shutdown(none) = %q, want %q", details.State, runtime.StateRunning) + } +} + +func TestAppleContainer_ShutdownAction_StopContainerStops(t *testing.T) { + if testing.Short() { + t.Skip() + } + + eng, rt := newAppleContainerEngine(t) + ws := writeWorkspace(t, `{ + "image": "docker.io/library/alpine:latest", + "shutdownAction": "stopContainer" + }`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + if err := eng.Shutdown(ctx, wsObj); err != nil { + t.Fatalf("Shutdown: %v", err) + } + + // Allow the apiserver a moment to flip status. + for i := 0; i < 40; i++ { + details, err := rt.InspectContainer(ctx, wsObj.Container.ID) + if err == nil && details.State != runtime.StateRunning { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Errorf("container still running 4s after Shutdown(stopContainer)") +} diff --git a/test/integration/applecontainer_uid_reconcile_test.go b/test/integration/applecontainer_uid_reconcile_test.go new file mode 100644 index 0000000..8caf164 --- /dev/null +++ b/test/integration/applecontainer_uid_reconcile_test.go @@ -0,0 +1,119 @@ +//go:build integration && darwin && arm64 + +// Apple-container backend: the inverted UID-reconcile contract. +// Design §13.8 declares we DO NOT run updateRemoteUserUID on this +// backend — virtiofs is identity-permissive, so the docker-style +// dance of rewriting /etc/passwd to match the host UID would be +// harmful without buying anything. This test pins that behavior: +// - Workspace folder lives at the host user's UID. +// - Container has a baked vscode user at a different UID. +// - After Up + Exec as vscode, /etc/passwd vscode's UID must be +// UNCHANGED (the baked image-default UID), AND the workspace +// mount must still be writable as vscode. + +package integration + +import ( + "context" + "errors" + "os" + "path/filepath" + "strings" + "testing" + "time" + + devcontainer "github.com/crunchloop/devcontainer" + "github.com/crunchloop/devcontainer/runtime" +) + +func TestAppleContainer_UID_NotReconciled_MountStillWritable(t *testing.T) { + if testing.Short() { + t.Skip() + } + + eng, rt := newAppleContainerEngine(t) + + // Build a base image with vscode at UID 4321 (intentionally weird + // so it can't collide with the host's actual UID). + dir := t.TempDir() + df := `FROM docker.io/library/alpine:latest +RUN addgroup -g 4321 vscode \ + && adduser -D -u 4321 -G vscode -s /bin/sh vscode +` + if err := writeFile(filepath.Join(dir, "Dockerfile"), df); err != nil { + t.Fatal(err) + } + tag := "dc-it-ac-uid-baked:latest" + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + if _, err := rt.BuildImage(ctx, runtimeBuildSpec(dir, tag), nil); err != nil { + var unavail *runtime.BuilderUnavailableError + if errors.As(err, &unavail) { + t.Skipf("BuildImage (builder not running): %v", err) + } + t.Fatalf("BuildImage: %v", err) + } + + ws := writeWorkspace(t, `{ + "image": "`+tag+`", + "remoteUser": "vscode", + "containerUser": "vscode" + }`) + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + // Read vscode's UID from inside the container. Apple-container + // behavior we're pinning: this MUST still be 4321 — the baked + // image-default — not the host UID. If a future engine change + // adds UID reconciliation to the apple-container path, this + // assertion fails. + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"id", "-u", "vscode"}, + }) + if err != nil { + t.Fatalf("Exec id -u vscode: %v", err) + } + if got := strings.TrimSpace(res.Stdout); got != "4321" { + t.Errorf("vscode UID inside container = %q, want %q (design §13.8 says we don't reconcile UIDs on apple-container)", + got, "4321") + } + + // Verify the workspace mount is writable as vscode (virtiofs + // identity-permissive contract). Probe-3 from the M6 design + // validation already proved this at the runtime layer; here we + // re-prove it through the full Engine.Up bind-mount path. + const marker = "ac-uid-test-writable-99" + // The engine binds the workspace at /workspaces/. + cmd := "echo " + marker + " > /workspaces/$(basename " + ws + ")/.uid-test && cat /workspaces/$(basename " + ws + ")/.uid-test" + res, err = eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"/bin/sh", "-c", cmd}, + }) + if err != nil { + t.Fatalf("Exec write probe: %v", err) + } + if res.ExitCode != 0 { + t.Fatalf("write probe failed: exit=%d stderr=%q stdout=%q", res.ExitCode, res.Stderr, res.Stdout) + } + if !strings.Contains(res.Stdout, marker) { + t.Errorf("marker not readable; got %q", res.Stdout) + } + + // Host-side readback confirms the file made it through virtiofs + // with the OUR-UID owner from the host's perspective. + data, err := os.ReadFile(ws + "/.uid-test") + if err != nil { + t.Fatalf("host readback: %v", err) + } + if !strings.Contains(string(data), marker) { + t.Errorf("host readback content mismatch: %q", string(data)) + } +} diff --git a/test/integration/applecontainer_userenvprobe_test.go b/test/integration/applecontainer_userenvprobe_test.go new file mode 100644 index 0000000..0ae9f37 --- /dev/null +++ b/test/integration/applecontainer_userenvprobe_test.go @@ -0,0 +1,159 @@ +//go:build integration && darwin && arm64 + +// Apple-container backend: userEnvProbe behavior through the engine. +// Mirrors a representative subset of userenvprobe_test.go for the +// docker backend (PathFromBashrc + LifecycleSeesBashrcPath + None). +// The full 6-variant matrix on docker is overkill here; these three +// hit the load-bearing engine paths. + +package integration + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + devcontainer "github.com/crunchloop/devcontainer" + "github.com/crunchloop/devcontainer/runtime" +) + +const appleBashImage = "docker.io/library/bash:5.2-alpine3.20" + +func TestAppleContainer_UserEnvProbe_PathFromBashrc(t *testing.T) { + if testing.Short() { + t.Skip() + } + eng, _ := newAppleContainerEngine(t) + + ws := writeWorkspace(t, `{ + "image": "`+appleBashImage+`", + "postCreateCommand": "echo 'export EXTRA_PATH=/from/bashrc' > /etc/profile.d/dc-go-test.sh" + }`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"printenv", "EXTRA_PATH"}, + }) + if err != nil { + t.Fatalf("Exec: %v", err) + } + if res.ExitCode != 0 { + t.Fatalf("printenv exit=%d stderr=%q", res.ExitCode, res.Stderr) + } + if got := strings.TrimSpace(res.Stdout); got != "/from/bashrc" { + t.Errorf("EXTRA_PATH = %q, want %q (probedEnv didn't inject from rc files)", got, "/from/bashrc") + } +} + +func TestAppleContainer_UserEnvProbe_None(t *testing.T) { + if testing.Short() { + t.Skip() + } + eng, _ := newAppleContainerEngine(t) + + ws := writeWorkspace(t, `{ + "image": "`+appleBashImage+`", + "userEnvProbe": "none", + "postCreateCommand": "echo 'export EXTRA_PATH=/from/bashrc' > /etc/profile.d/dc-go-test.sh" + }`) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"printenv", "EXTRA_PATH"}, + }) + // printenv exits non-zero when the var is unset; that's the + // success signal here. + if err != nil { + t.Fatalf("Exec: %v", err) + } + if res.ExitCode == 0 { + t.Errorf("EXTRA_PATH leaked with userEnvProbe=none (stdout=%q)", res.Stdout) + } +} + +// TestAppleContainer_UserEnvProbe_LifecycleSeesBashrcPath is the +// stricter variant: postCreate itself must see the probed env so a +// tool installed only via rc files is resolvable during the hook. Uses +// PR-G2 BuildImage to bake the fake tool + rc snippet into a base +// image. Skips if the builder isn't running. +func TestAppleContainer_UserEnvProbe_LifecycleSeesBashrcPath(t *testing.T) { + if testing.Short() { + t.Skip() + } + eng, rt := newAppleContainerEngine(t) + + dir := t.TempDir() + df := `FROM ` + appleBashImage + ` +RUN mkdir -p /opt/mytool/bin /etc/profile.d \ + && printf '#!/bin/sh\necho hello-from-mytool\n' > /opt/mytool/bin/mytool \ + && chmod +x /opt/mytool/bin/mytool \ + && printf 'export PATH=/opt/mytool/bin:$PATH\n' > /etc/profile.d/mytool.sh +` + if err := writeFile(dir+"/Dockerfile", df); err != nil { + t.Fatal(err) + } + tag := "dc-it-ac-userenv:latest" + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + if _, err := rt.BuildImage(ctx, runtimeBuildSpec(dir, tag), nil); err != nil { + var unavail *runtime.BuilderUnavailableError + if errors.As(err, &unavail) { + t.Skipf("BuildImage (builder not running): %v", err) + } + t.Fatalf("BuildImage: %v", err) + } + + ws := writeWorkspace(t, `{ + "image": "`+tag+`", + "postCreateCommand": "command -v mytool && mytool > /tmp/lifecycle-out" + }`) + + wsObj, err := eng.Up(ctx, devcontainer.UpOptions{ + LocalWorkspaceFolder: ws, + Recreate: true, + }) + if err != nil { + t.Fatalf("Up: %v (postCreate likely failed to resolve mytool — probe not merged into lifecycle env)", err) + } + defer func() { + _ = eng.Down(context.Background(), wsObj, devcontainer.DownOptions{Remove: true}) + }() + + res, err := eng.Exec(ctx, wsObj, devcontainer.ExecOptions{ + Cmd: []string{"cat", "/tmp/lifecycle-out"}, + }) + if err != nil { + t.Fatalf("read lifecycle output: %v", err) + } + if got := strings.TrimSpace(res.Stdout); got != "hello-from-mytool" { + t.Errorf("/tmp/lifecycle-out = %q, want %q", got, "hello-from-mytool") + } +}