diff --git a/QUICKSTART-DEPLOY.md b/QUICKSTART-DEPLOY.md index 5e34c271ad..ad09a2e3bd 100644 --- a/QUICKSTART-DEPLOY.md +++ b/QUICKSTART-DEPLOY.md @@ -5,6 +5,7 @@ This guide will help you get the Unkey deployment platform up and running locall ## Prerequisites - Docker and Docker Compose +- Go 1.24 or later - A terminal/command line ## Step 1: Start the Platform @@ -12,21 +13,12 @@ This guide will help you get the Unkey deployment platform up and running locall 1. Start all services using Docker Compose: ```bash -docker-compose up -d +docker-compose up metald-aio dashboard ctrl -d ``` -This will start: +2. Wait for all services to be healthy -- MySQL database (port 3306) -- Dashboard (port 3000) -- Control plane services -- Supporting infrastructure - -2. Wait for all services to be healthy (this may take 1-2 minutes): - -```bash -docker-compose ps -``` +The platform now uses a Docker backend that creates containers instead of VMs, making it much faster and easier to run locally. ## Step 2: Set Up Your Workspace @@ -36,31 +28,17 @@ docker-compose ps http://localhost:3000 ``` -2. Sign in or create an account through the authentication flow +2. Create a workspace and copy its id -3. Once logged in, you'll automatically have a workspace created. Navigate to: +3. Create a new project by filling out the form: -``` -http://localhost:3000/projects -``` +Go to http://localhost:3000/projects -4. Create a new project by filling out the form: +- **Name**: Choose any name (e.g., "My Test App") +- **Slug**: This will auto-generate based on the name +- **Git URL**: Optional, leave blank for testing - - **Name**: Choose any name (e.g., "My Test App") - - **Slug**: This will auto-generate based on the name - - **Git URL**: Optional, leave blank for testing - -5. After creating the project, **copy the Project ID** from the project details. It will look like: - -``` -proj_xxxxxxxxxxxxxxxxxx -``` - -6. Also note your **Workspace ID** (you can find this settings). It will look like: - -``` -ws_xxxxxxxxxxxxxxxxxx -``` +4. After creating the project, **copy the Project ID** from the project details. It will look like: ## Step 3: Deploy a Version @@ -82,21 +60,71 @@ go run . version create \ Keep the context as shown, there's a demo api in that folder. Replace `YOUR_WORKSPACE_ID` and `YOUR_PROJECT_ID` with the actual values you copied from the dashboard. -3. The CLI will show real-time progress as your deployment goes through these stages: - - Downloading Docker image - - Building rootfs - - Uploading rootfs - - Creating VM - - Booting VM - - Assigning domains - - Completed +3. The CLI will: + - Always build a fresh Docker image from your code + - Set the PORT environment variable to 8080 in the container + - Use the Docker backend to create a container instead of a VM + - Automatically allocate a random host port (e.g., 35432) to avoid conflicts + - Show real-time progress as your deployment goes through the stages ## Step 4: View Your Deployment -1. Return to the dashboard and navigate to: +1. Once the deployment completes, the CLI will show you the available domains: + +``` +Deployment Complete + Version ID: v_xxxxxxxxxxxxxxxxxx + Status: Ready + Environment: Production + +Domains + https://main-commit-workspace.unkey.app + http://localhost:35432 +``` + +2. If you're using the `demo_api` you can curl the `/v1/liveness` endpoint +3. Return to the dashboard and navigate to: ``` http://localhost:3000/versions http://localhost:3000/deployments ``` +### Important: Your Application Must Listen on the PORT Environment Variable + +**Your deployed application MUST read the `PORT` environment variable and listen on that port.** The platform sets `PORT=8080` in the container, and your code needs to use this value. + +**Example for different languages:** + +```javascript +// Node.js +const port = process.env.PORT || 3000; +app.listen(port, () => { + console.log(`Server running on port ${port}`); +}); +``` + +```python +# Python +import os +port = int(os.environ.get('PORT', 3000)) +app.run(host='0.0.0.0', port=port) +``` + +```go +// Go +port := os.Getenv("PORT") +if port == "" { + port = "3000" +} +http.ListenAndServe(":"+port, handler) +``` + +The demo_api already follows this pattern and listens on the PORT environment variable. + +## Troubleshooting + +- If you see "port is already allocated" errors, the system will automatically retry with a new random port +- Check container logs: `docker logs ` +- Verify the demo_api is listening on the PORT environment variable (should be 8080) +- Make sure your Dockerfile exposes the correct port (8080 in the demo_api example) diff --git a/go/cmd/version/main.go b/go/cmd/version/main.go index 3df75e423f..85cdbaa7c1 100644 --- a/go/cmd/version/main.go +++ b/go/cmd/version/main.go @@ -55,6 +55,10 @@ var createCmd = &cli.Command{ Usage: "Docker image tag (e.g., ghcr.io/user/app:tag). If not provided, builds from current directory", Required: false, }, + &cli.BoolFlag{ + Name: "force-build", + Usage: "Force build Docker image even if --docker-image is provided", + }, &cli.StringFlag{ Name: "dockerfile", Usage: "Path to Dockerfile", @@ -123,29 +127,34 @@ func createAction(ctx context.Context, cmd *cli.Command) error { dockerfile := cmd.String("dockerfile") buildContext := cmd.String("context") + // Always build the image, ignoring any provided docker-image + dockerImage = "" + return runDeploymentSteps(ctx, cmd, workspaceID, projectID, branch, dockerImage, dockerfile, buildContext, commit, logger) } -func printDeploymentComplete(versionID, workspace, branch string) { - // Use actual Git info for hostname generation - gitInfo := git.GetInfo() - identifier := versionID - if gitInfo.IsRepo && gitInfo.CommitSHA != "" { - identifier = gitInfo.CommitSHA - } - +func printDeploymentComplete(version *ctrlv1.Version) { fmt.Println() fmt.Println("Deployment Complete") - fmt.Printf(" Version ID: %s\n", versionID) + fmt.Printf(" Version ID: %s\n", version.GetId()) fmt.Printf(" Status: Ready\n") fmt.Printf(" Environment: Production\n") fmt.Println() fmt.Println("Domains") - // Replace underscores with dashes for valid hostname format - cleanIdentifier := strings.ReplaceAll(identifier, "_", "-") - fmt.Printf(" https://%s-%s-%s.unkey.app\n", branch, cleanIdentifier, workspace) - fmt.Printf(" https://api.acme.com\n") + hostnames := version.GetHostnames() + if len(hostnames) > 0 { + for _, hostname := range hostnames { + // Check if it's a localhost hostname (don't add https://) + if strings.HasPrefix(hostname, "localhost:") { + fmt.Printf(" http://%s\n", hostname) + } else { + fmt.Printf(" https://%s\n", hostname) + } + } + } else { + fmt.Printf(" No hostnames assigned\n") + } } func runDeploymentSteps(ctx context.Context, cmd *cli.Command, workspace, project, branch, dockerImage, dockerfile, buildContext, commit string, logger logging.Logger) error { @@ -324,17 +333,18 @@ func runDeploymentSteps(ctx context.Context, cmd *cli.Command, workspace, projec fmt.Printf(" Version ID: %s\n", versionID) // Poll for version status updates - if err := pollVersionStatus(ctx, logger, client, versionID); err != nil { + finalVersion, err := pollVersionStatus(ctx, logger, client, versionID) + if err != nil { return fmt.Errorf("deployment failed: %w", err) } - printDeploymentComplete(versionID, workspace, branch) + printDeploymentComplete(finalVersion) return nil } // pollVersionStatus polls the control plane API and displays deployment steps as they occur -func pollVersionStatus(ctx context.Context, logger logging.Logger, client ctrlv1connect.VersionServiceClient, versionID string) error { +func pollVersionStatus(ctx context.Context, logger logging.Logger, client ctrlv1connect.VersionServiceClient, versionID string) (*ctrlv1.Version, error) { ticker := time.NewTicker(2 * time.Second) defer ticker.Stop() @@ -346,10 +356,10 @@ func pollVersionStatus(ctx context.Context, logger logging.Logger, client ctrlv1 for { select { case <-ctx.Done(): - return ctx.Err() + return nil, ctx.Err() case <-timeout.C: fmt.Printf("Error: Deployment timeout after 5 minutes\n") - return fmt.Errorf("deployment timeout") + return nil, fmt.Errorf("deployment timeout") case <-ticker.C: // Always poll version status getReq := connect.NewRequest(&ctrlv1.GetVersionRequest{ @@ -377,12 +387,12 @@ func pollVersionStatus(ctx context.Context, logger logging.Logger, client ctrlv1 // Check if deployment is complete if version.GetStatus() == ctrlv1.VersionStatus_VERSION_STATUS_ACTIVE { - return nil + return version, nil } // Check if deployment failed if version.GetStatus() == ctrlv1.VersionStatus_VERSION_STATUS_FAILED { - return fmt.Errorf("deployment failed") + return nil, fmt.Errorf("deployment failed") } } } diff --git a/go/demo_api/Dockerfile b/go/demo_api/Dockerfile index 2e069edfb7..bbb1b96c87 100644 --- a/go/demo_api/Dockerfile +++ b/go/demo_api/Dockerfile @@ -17,6 +17,6 @@ WORKDIR /root/ COPY --from=builder /app/main . -EXPOSE 8080 +ENV PORT 8080 CMD ["./main"] diff --git a/go/deploy/Dockerfile.dev b/go/deploy/Dockerfile.dev index b5deff6e8d..c867950f72 100644 --- a/go/deploy/Dockerfile.dev +++ b/go/deploy/Dockerfile.dev @@ -1,61 +1,53 @@ # Dockerfile.dev - Development environment for all Unkey deploy services # Based on LOCAL_DEPLOYMENT_GUIDE.md for maximum production parity -# Build stage - compile all services -FROM fedora:42 AS builder +# Install stage - install all dependencies once +FROM fedora:42 AS install -# Install development tools (following LOCAL_DEPLOYMENT_GUIDE.md) +# Install all dependencies (dev tools + runtime deps + Docker CLI) RUN dnf install -y dnf-plugins-core && \ dnf group install -y development-tools && \ - dnf install -y git make golang curl wget iptables-legacy && \ + dnf install -y git make golang curl wget iptables-legacy \ + systemd systemd-devel procps-ng util-linux && \ + dnf config-manager addrepo --from-repofile=https://download.docker.com/linux/fedora/docker-ce.repo && \ + dnf install -y docker-ce-cli && \ dnf clean all - # Set up Go environment ENV GOPATH=/go ENV PATH=$PATH:/go/bin:/usr/local/go/bin +# Base build stage with source code +FROM install AS build-base + # Copy source code COPY . /src/go WORKDIR /src/go # Protobuf files are already generated in go/proto/ - no need to generate them again -# Build all services directly using go build (protobufs already generated) -# Go will download dependencies as needed during build +# Build assetmanagerd +FROM build-base AS build-assetmanagerd WORKDIR /src/go/deploy/assetmanagerd RUN go build -o assetmanagerd ./cmd/assetmanagerd +# Build billaged +FROM build-base AS build-billaged WORKDIR /src/go/deploy/billaged RUN go build -o billaged ./cmd/billaged +# Build builderd +FROM build-base AS build-builderd WORKDIR /src/go/deploy/builderd RUN go build -o builderd ./cmd/builderd +# Build metald +FROM build-base AS build-metald WORKDIR /src/go/deploy/metald RUN go build -o metald ./cmd/metald -# Runtime stage - Fedora with systemd -FROM fedora:42 - -# Install runtime dependencies -RUN dnf update -y && \ - dnf install -y \ - systemd \ - systemd-devel \ - iptables-legacy \ - curl \ - wget \ - procps-ng \ - util-linux \ - && \ - dnf clean all - -# Install Docker CLI for metald Docker backend -RUN dnf install -y dnf-plugins-core && \ - dnf config-manager addrepo --from-repofile=https://download.docker.com/linux/fedora/docker-ce.repo && \ - dnf install -y docker-ce-cli && \ - dnf clean all +# Runtime stage - reuse install stage (all deps already installed) +FROM install AS runtime # Create billaged user (following systemd service requirements) RUN useradd -r -s /bin/false billaged @@ -72,11 +64,11 @@ RUN mkdir -p /opt/assetmanagerd/{cache,data} && \ # Set ownership for service directories RUN chown -R billaged:billaged /opt/billaged /var/log/billaged -# Copy built binaries from builder stage -COPY --from=builder /src/go/deploy/assetmanagerd/assetmanagerd /usr/local/bin/ -COPY --from=builder /src/go/deploy/billaged/billaged /usr/local/bin/ -COPY --from=builder /src/go/deploy/builderd/builderd /usr/local/bin/ -COPY --from=builder /src/go/deploy/metald/metald /usr/local/bin/ +# Copy built binaries from respective build stages +COPY --from=build-assetmanagerd /src/go/deploy/assetmanagerd/assetmanagerd /usr/local/bin/ +COPY --from=build-billaged /src/go/deploy/billaged/billaged /usr/local/bin/ +COPY --from=build-builderd /src/go/deploy/builderd/builderd /usr/local/bin/ +COPY --from=build-metald /src/go/deploy/metald/metald /usr/local/bin/ # Make binaries executable @@ -217,7 +209,7 @@ LABEL org.unkey.component="deploy-services" \ # AIDEV-NOTE: This Dockerfile follows the LOCAL_DEPLOYMENT_GUIDE.md as closely as possible # Key features: # 1. Uses Fedora 42 (production parity) -# 2. Multi-stage build with development tools +# 2. Multi-stage build with parallel service compilation for faster builds # 3. systemd as process manager # 4. All services built using existing Makefiles # 5. TLS disabled for development diff --git a/go/deploy/metald/internal/backend/docker/client.go b/go/deploy/metald/internal/backend/docker/client.go index 7e03174298..ba43700fde 100644 --- a/go/deploy/metald/internal/backend/docker/client.go +++ b/go/deploy/metald/internal/backend/docker/client.go @@ -4,14 +4,16 @@ import ( "context" "encoding/json" "fmt" + "io" "log/slog" - "net" + "math/rand" "strconv" "strings" "sync" "time" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" backendtypes "github.com/unkeyed/unkey/go/deploy/metald/internal/backend/types" @@ -64,7 +66,7 @@ func NewDockerBackend(logger *slog.Logger, config *DockerBackendConfig) (*Docker // Verify Docker connection ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - + if _, err := dockerClient.Ping(ctx); err != nil { return nil, fmt.Errorf("failed to connect to Docker daemon: %w", err) } @@ -126,6 +128,7 @@ func NewDockerBackend(logger *slog.Logger, config *DockerBackendConfig) (*Docker vmErrorCounter: vmErrorCounter, } + return backend, nil } @@ -222,6 +225,17 @@ func (d *DockerBackend) BootVM(ctx context.Context, vmID string) error { slog.String("container_id", vm.ContainerID), ) + // Check if container still exists before starting + _, err := d.dockerClient.ContainerInspect(ctx, vm.ContainerID) + if err != nil { + span.RecordError(err) + d.vmErrorCounter.Add(ctx, 1, metric.WithAttributes( + attribute.String("operation", "boot"), + attribute.String("error", "container_not_found"), + )) + return fmt.Errorf("container not found before start: %w", err) + } + // Start container if err := d.dockerClient.ContainerStart(ctx, vm.ContainerID, container.StartOptions{}); err != nil { span.RecordError(err) @@ -235,7 +249,7 @@ func (d *DockerBackend) BootVM(ctx context.Context, vmID string) error { // Update VM state and network info d.mutex.Lock() vm.State = metaldv1.VmState_VM_STATE_RUNNING - + // Get container network info networkInfo, err := d.getContainerNetworkInfo(ctx, vm.ContainerID) if err != nil { @@ -549,10 +563,10 @@ func (d *DockerBackend) GetVMMetrics(ctx context.Context, vmID string) (*backend Timestamp: time.Now(), CpuTimeNanos: int64(dockerStats.CPUStats.CPUUsage.TotalUsage), MemoryUsageBytes: int64(dockerStats.MemoryStats.Usage), - DiskReadBytes: 0, // TODO: Calculate from BlkioStats - DiskWriteBytes: 0, // TODO: Calculate from BlkioStats - NetworkRxBytes: 0, // TODO: Calculate from NetworkStats - NetworkTxBytes: 0, // TODO: Calculate from NetworkStats + DiskReadBytes: 0, // TODO: Calculate from BlkioStats + DiskWriteBytes: 0, // TODO: Calculate from BlkioStats + NetworkRxBytes: 0, // TODO: Calculate from NetworkStats + NetworkTxBytes: 0, // TODO: Calculate from NetworkStats } // Calculate disk I/O @@ -609,12 +623,12 @@ func (d *DockerBackend) vmConfigToContainerSpec(ctx context.Context, vmID string CPUs: float64(config.GetCpu().GetVcpuCount()), } - // Determine image from metadata or use default - if dockerImage, ok := config.Metadata["docker_image"]; ok { - spec.Image = dockerImage - } else { - spec.Image = d.config.DefaultImage + // Docker image must be specified in metadata + dockerImage, ok := config.Metadata["docker_image"] + if !ok || dockerImage == "" { + return nil, fmt.Errorf("docker_image must be specified in VM config metadata") } + spec.Image = dockerImage // Extract exposed ports from metadata if exposedPorts, ok := config.Metadata["exposed_ports"]; ok { @@ -626,6 +640,16 @@ func (d *DockerBackend) vmConfigToContainerSpec(ctx context.Context, vmID string } } + // Extract environment variables from metadata + if envVars, ok := config.Metadata["env_vars"]; ok { + vars := strings.Split(envVars, ",") + for _, envVar := range vars { + if envVar = strings.TrimSpace(envVar); envVar != "" { + spec.Env = append(spec.Env, envVar) + } + } + } + // Allocate host ports for exposed ports for _, exposedPort := range spec.ExposedPorts { containerPort, err := strconv.Atoi(strings.Split(exposedPort, "/")[0]) @@ -633,19 +657,15 @@ func (d *DockerBackend) vmConfigToContainerSpec(ctx context.Context, vmID string continue } - hostPort, err := d.portAllocator.allocatePort(vmID) - if err != nil { - return nil, fmt.Errorf("failed to allocate port: %w", err) - } - protocol := "tcp" if strings.Contains(exposedPort, "/udp") { protocol = "udp" } + // We'll allocate the port during container creation with retry logic spec.PortMappings = append(spec.PortMappings, PortMapping{ ContainerPort: containerPort, - HostPort: hostPort, + HostPort: 0, // Will be allocated during creation Protocol: protocol, }) } @@ -660,6 +680,27 @@ func (d *DockerBackend) createContainer(ctx context.Context, spec *ContainerSpec ) defer span.End() + d.logger.Info("checking if image exists locally", "image", spec.Image) + _, err := d.dockerClient.ImageInspect(ctx, spec.Image) + if err != nil { + d.logger.Info("image not found locally, pulling image", "image", spec.Image, "error", err.Error()) + pullResponse, err := d.dockerClient.ImagePull(ctx, spec.Image, image.PullOptions{}) + if err != nil { + return "", fmt.Errorf("failed to pull image %s: %w", spec.Image, err) + } + defer pullResponse.Close() + + // Read the pull response to completion to ensure pull finishes + _, err = io.ReadAll(pullResponse) + if err != nil { + return "", fmt.Errorf("failed to read pull response for image %s: %w", spec.Image, err) + } + + d.logger.Info("image pulled successfully", "image", spec.Image) + } else { + d.logger.Info("image found locally, skipping pull", "image", spec.Image) + } + // Build container configuration config := &container.Config{ Image: spec.Image, @@ -670,6 +711,9 @@ func (d *DockerBackend) createContainer(ctx context.Context, spec *ContainerSpec WorkingDir: spec.WorkingDir, } + // Log the container command for debugging + d.logger.Info("container configuration", "image", spec.Image, "cmd", config.Cmd, "env", config.Env) + // Set up exposed ports for _, mapping := range spec.PortMappings { port := nat.Port(fmt.Sprintf("%d/%s", mapping.ContainerPort, mapping.Protocol)) @@ -679,35 +723,77 @@ func (d *DockerBackend) createContainer(ctx context.Context, spec *ContainerSpec // Build host configuration hostConfig := &container.HostConfig{ PortBindings: make(nat.PortMap), - AutoRemove: d.config.AutoRemove, + AutoRemove: false, // Don't auto-remove containers for debugging Privileged: d.config.Privileged, Resources: container.Resources{ - Memory: spec.Memory, + Memory: spec.Memory, NanoCPUs: int64(spec.CPUs * 1e9), }, } - // Set up port bindings - for _, mapping := range spec.PortMappings { - containerPort := nat.Port(fmt.Sprintf("%d/%s", mapping.ContainerPort, mapping.Protocol)) - hostConfig.PortBindings[containerPort] = []nat.PortBinding{ - { - HostIP: "0.0.0.0", - HostPort: strconv.Itoa(mapping.HostPort), - }, + // Set up port bindings with retry logic + maxRetries := 5 + for retry := 0; retry < maxRetries; retry++ { + // Clear previous port bindings + hostConfig.PortBindings = make(nat.PortMap) + + // Allocate ports for this attempt + var allocatedPorts []int + portAllocationFailed := false + + for i, mapping := range spec.PortMappings { + if mapping.HostPort == 0 { + // Allocate a new port + hostPort, err := d.portAllocator.allocatePort(spec.Labels["unkey.vm.id"]) + if err != nil { + // Release any ports allocated in this attempt + for _, port := range allocatedPorts { + d.portAllocator.releasePort(port, spec.Labels["unkey.vm.id"]) + } + portAllocationFailed = true + break + } + spec.PortMappings[i].HostPort = hostPort + allocatedPorts = append(allocatedPorts, hostPort) + } + + containerPort := nat.Port(fmt.Sprintf("%d/%s", mapping.ContainerPort, mapping.Protocol)) + hostConfig.PortBindings[containerPort] = []nat.PortBinding{ + { + HostIP: "0.0.0.0", + HostPort: strconv.Itoa(spec.PortMappings[i].HostPort), + }, + } + } + + if portAllocationFailed { + continue // Try again with new ports } - } - // Create container - containerName := d.config.ContainerPrefix + spec.Labels["unkey.vm.id"] - resp, err := d.dockerClient.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) - if err != nil { - span.RecordError(err) - return "", fmt.Errorf("failed to create container: %w", err) + // Create container + containerName := d.config.ContainerPrefix + spec.Labels["unkey.vm.id"] + resp, err := d.dockerClient.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) + if err != nil { + // If it's a port binding error, release ports and try again + if strings.Contains(err.Error(), "port is already allocated") || strings.Contains(err.Error(), "bind") { + for _, port := range allocatedPorts { + d.portAllocator.releasePort(port, spec.Labels["unkey.vm.id"]) + } + d.logger.Warn("port binding failed, retrying with new ports", "error", err, "retry", retry+1) + continue + } + // Other errors are not retryable + span.RecordError(err) + return "", fmt.Errorf("failed to create container: %w", err) + } + + // Success! + span.SetAttributes(attribute.String("container_id", resp.ID)) + return resp.ID, nil } - span.SetAttributes(attribute.String("container_id", resp.ID)) - return resp.ID, nil + // If we get here, all retries failed + return "", fmt.Errorf("failed to create container after %d retries due to port conflicts", maxRetries) } // getContainerNetworkInfo gets network information for a container @@ -739,6 +825,47 @@ func (d *DockerBackend) getContainerNetworkInfo(ctx context.Context, containerID } } + // Add port mappings from container inspect + if inspect.NetworkSettings != nil && inspect.NetworkSettings.Ports != nil { + var portMappings []*metaldv1.PortMapping + for containerPort, hostBindings := range inspect.NetworkSettings.Ports { + if len(hostBindings) > 0 { + // Parse container port (e.g., "3000/tcp" -> 3000) + portStr := strings.Split(string(containerPort), "/")[0] + containerPortNum, err := strconv.Atoi(portStr) + if err != nil { + continue + } + + // Get protocol (tcp/udp) + protocol := "tcp" + if strings.Contains(string(containerPort), "/udp") { + protocol = "udp" + } + + // Add mapping for each host binding + for _, hostBinding := range hostBindings { + hostPortNum, err := strconv.Atoi(hostBinding.HostPort) + if err != nil { + continue + } + + portMappings = append(portMappings, &metaldv1.PortMapping{ + ContainerPort: int32(containerPortNum), + HostPort: int32(hostPortNum), + Protocol: protocol, + }) + } + } + } + + // Initialize networkInfo if it doesn't exist + if networkInfo == nil { + networkInfo = &metaldv1.VmNetworkInfo{} + } + networkInfo.PortMappings = portMappings + } + return networkInfo, nil } @@ -749,18 +876,17 @@ func (pa *portAllocator) allocatePort(vmID string) (int, error) { pa.mutex.Lock() defer pa.mutex.Unlock() - // Find available port - for port := pa.minPort; port <= pa.maxPort; port++ { + // Try random ports to avoid conflicts + maxAttempts := 100 + for attempt := 0; attempt < maxAttempts; attempt++ { + port := rand.Intn(pa.maxPort-pa.minPort+1) + pa.minPort if _, exists := pa.allocated[port]; !exists { - // Check if port is actually available - if pa.isPortAvailable(port) { - pa.allocated[port] = vmID - return port, nil - } + pa.allocated[port] = vmID + return port, nil } } - return 0, fmt.Errorf("no available ports in range %d-%d", pa.minPort, pa.maxPort) + return 0, fmt.Errorf("no available ports in range %d-%d after %d attempts", pa.minPort, pa.maxPort, maxAttempts) } // releasePort releases a port from a VM @@ -773,15 +899,6 @@ func (pa *portAllocator) releasePort(port int, vmID string) { } } -// isPortAvailable checks if a port is available on the host -func (pa *portAllocator) isPortAvailable(port int) bool { - conn, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - if err != nil { - return false - } - conn.Close() - return true -} // Ensure DockerBackend implements Backend interface var _ backendtypes.Backend = (*DockerBackend)(nil) @@ -793,4 +910,4 @@ var _ backendtypes.Backend = (*DockerBackend)(nil) // 2. No privileged operations - Docker daemon handles isolation // 3. Familiar container semantics - easier debugging and monitoring // 4. Fast startup times - containers start instantly vs VM boot time -// 5. Resource efficiency - shared kernel, no VM overhead \ No newline at end of file +// 5. Resource efficiency - shared kernel, no VM overhead diff --git a/go/deploy/metald/internal/backend/docker/types.go b/go/deploy/metald/internal/backend/docker/types.go index 2022890f8b..87d26876ed 100644 --- a/go/deploy/metald/internal/backend/docker/types.go +++ b/go/deploy/metald/internal/backend/docker/types.go @@ -50,8 +50,6 @@ type DockerBackendConfig struct { // ContainerPrefix is the prefix for container names (defaults to unkey-vm-) ContainerPrefix string `json:"container_prefix,omitempty"` - // DefaultImage is the default image to use if none specified - DefaultImage string `json:"default_image,omitempty"` // PortRange defines the range of host ports to allocate PortRange struct { @@ -72,7 +70,6 @@ func DefaultDockerBackendConfig() *DockerBackendConfig { DockerHost: "", // Use default Docker socket NetworkName: "bridge", ContainerPrefix: "unkey-vm-", - DefaultImage: "alpine:latest", PortRange: struct { Min int `json:"min"` Max int `json:"max"`