add supplemental container support

This commit is contained in:
Zoe
2025-04-10 16:18:42 +00:00
parent 2bd953dcb6
commit d501775ae6
21 changed files with 460 additions and 215 deletions

268
internal/server/app.go Normal file
View File

@@ -0,0 +1,268 @@
package server
import (
"context"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/juls0730/flux/pkg"
"go.uber.org/zap"
)
type App struct {
ID int64 `json:"id,omitempty"`
Deployment *Deployment `json:"-"`
Name string `json:"name,omitempty"`
DeploymentID int64 `json:"deployment_id,omitempty"`
}
// Create the initial app row in the database and create and start the deployment. The app is the overarching data
// structure that contains all of the data for a project
func CreateApp(ctx context.Context, imageName string, projectPath string, projectConfig *pkg.ProjectConfig) (*App, error) {
app := &App{
Name: projectConfig.Name,
}
logger.Debugw("Creating deployment", zap.String("name", app.Name))
deployment, err := CreateDeployment(projectConfig.Port, projectConfig.Url, Flux.db)
app.Deployment = deployment
if err != nil {
logger.Errorw("Failed to create deployment", zap.Error(err))
return nil, err
}
for _, container := range projectConfig.Containers {
c, err := CreateContainer(ctx, &container, projectConfig.Name, false, deployment)
if err != nil {
return nil, fmt.Errorf("failed to create container: %v", err)
}
c.Start(ctx, true)
}
headContainer := &pkg.Container{
Name: projectConfig.Name,
ImageName: imageName,
Volumes: projectConfig.Volumes,
Environment: projectConfig.Environment,
}
// this call does a lot for us, see it's documentation for more info
_, err = CreateContainer(ctx, headContainer, projectConfig.Name, true, deployment)
if err != nil {
return nil, fmt.Errorf("failed to create container: %v", err)
}
// create app in the database
err = appInsertStmt.QueryRow(projectConfig.Name, deployment.ID).Scan(&app.ID, &app.Name, &app.DeploymentID)
if err != nil {
return nil, fmt.Errorf("failed to insert app: %v", err)
}
err = deployment.Start(ctx)
if err != nil {
return nil, fmt.Errorf("failed to start deployment: %v", err)
}
Flux.appManager.AddApp(app.Name, app)
return app, nil
}
func (app *App) Upgrade(ctx context.Context, imageName string, projectPath string, projectConfig *pkg.ProjectConfig) error {
logger.Debugw("Upgrading deployment", zap.String("name", app.Name))
// if deploy is not started, start it
deploymentStatus, err := app.Deployment.Status(ctx)
if err != nil {
return fmt.Errorf("failed to get deployment status: %v", err)
}
if deploymentStatus != "running" {
err = app.Deployment.Start(ctx)
if err != nil {
return fmt.Errorf("failed to start deployment: %v", err)
}
}
err = app.Deployment.Upgrade(ctx, projectConfig, imageName, projectPath)
if err != nil {
return fmt.Errorf("failed to upgrade deployment: %v", err)
}
return nil
}
// delete an app and deployment from the database, and its project files from disk.
func (app *App) Remove(ctx context.Context) error {
Flux.appManager.RemoveApp(app.Name)
err := app.Deployment.Remove(ctx)
if err != nil {
logger.Errorw("Failed to remove deployment", zap.Error(err))
return err
}
_, err = Flux.db.Exec("DELETE FROM apps WHERE id = ?", app.ID)
if err != nil {
logger.Errorw("Failed to delete app", zap.Error(err))
return err
}
projectPath := filepath.Join(Flux.rootDir, "apps", app.Name)
err = os.RemoveAll(projectPath)
if err != nil {
return fmt.Errorf("failed to remove project directory: %v", err)
}
return nil
}
type AppManager struct {
sync.Map
}
func (am *AppManager) GetApp(name string) *App {
app, exists := am.Load(name)
if !exists {
return nil
}
return app.(*App)
}
func (am *AppManager) GetAllApps() []*App {
var apps []*App
am.Range(func(key, value interface{}) bool {
if app, ok := value.(*App); ok {
apps = append(apps, app)
}
return true
})
return apps
}
// removes an app from the app manager
func (am *AppManager) RemoveApp(name string) {
am.Delete(name)
}
// add a given app to the app manager
func (am *AppManager) AddApp(name string, app *App) {
if app.Deployment.Containers == nil || app.Deployment.Head == nil || len(app.Deployment.Containers) == 0 {
panic("nil containers")
}
am.Store(name, app)
}
// nukes an app completely
func (am *AppManager) DeleteApp(name string) error {
app := am.GetApp(name)
if app == nil {
return fmt.Errorf("app not found")
}
err := app.Remove(context.Background())
if err != nil {
return err
}
am.Delete(name)
return nil
}
// Scan every app in the database, and create in memory structures if the deployment is already running
func (am *AppManager) Init() {
logger.Info("Initializing deployments")
if Flux.db == nil {
logger.Panic("DB is nil")
}
rows, err := Flux.db.Query("SELECT id, name, deployment_id FROM apps")
if err != nil {
logger.Warnw("Failed to query apps", zap.Error(err))
return
}
defer rows.Close()
var apps []App
for rows.Next() {
var app App
if err := rows.Scan(&app.ID, &app.Name, &app.DeploymentID); err != nil {
logger.Warnw("Failed to scan app", zap.Error(err))
return
}
apps = append(apps, app)
}
for _, app := range apps {
deployment := &Deployment{}
var headContainer *Container
Flux.db.QueryRow("SELECT id, url, port FROM deployments WHERE id = ?", app.DeploymentID).Scan(&deployment.ID, &deployment.URL, &deployment.Port)
deployment.Containers = make([]*Container, 0)
rows, err = Flux.db.Query("SELECT id, container_id, deployment_id, head FROM containers WHERE deployment_id = ?", app.DeploymentID)
if err != nil {
logger.Warnw("Failed to query containers", zap.Error(err))
return
}
defer rows.Close()
for rows.Next() {
var container Container
var containerIDString string
rows.Scan(&container.ID, &containerIDString, &container.DeploymentID, &container.Head)
container.Deployment = deployment
copy(container.ContainerID[:], containerIDString)
if container.Head {
if headContainer != nil {
logger.Fatal("Several containers are marked as head")
}
headContainer = &container
}
rows, err := Flux.db.Query("SELECT id, volume_id, container_id, mountpoint FROM volumes WHERE container_id = ?", container.ContainerID[:])
if err != nil {
logger.Warnw("Failed to query volumes", zap.Error(err))
return
}
defer rows.Close()
for rows.Next() {
volume := new(Volume)
rows.Scan(&volume.ID, &volume.VolumeID, &volume.ContainerID, &volume.Mountpoint)
container.Volumes = append(container.Volumes, volume)
}
deployment.Containers = append(deployment.Containers, &container)
}
if headContainer == nil {
logger.Fatal("head container is nil!")
}
deployment.Head = headContainer
app.Deployment = deployment
am.AddApp(app.Name, &app)
status, err := deployment.Status(context.Background())
if err != nil {
logger.Warnw("Failed to get deployment status", zap.Error(err))
continue
}
if status != "running" {
continue
}
deployment.Proxy, _ = deployment.NewDeploymentProxy()
Flux.proxy.AddDeployment(deployment)
}
}

View File

@@ -0,0 +1,541 @@
package server
import (
"context"
"database/sql"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/volume"
"github.com/juls0730/flux/pkg"
"go.uber.org/zap"
)
var (
containerInsertStmt *sql.Stmt
)
type Volume struct {
ID int64 `json:"id"`
VolumeID string `json:"volume_id"`
Mountpoint string `json:"mountpoint"`
ContainerID string `json:"container_id"`
}
type Container struct {
ID int64 `json:"id"`
Head bool `json:"head"` // if the container is the head of the deployment
Name string `json:"name"`
Deployment *Deployment `json:"-"`
Volumes []*Volume `json:"volumes"`
ContainerID [64]byte `json:"container_id"`
DeploymentID int64 `json:"deployment_id"`
}
// Creates a volume in the docker daemon and returns the descriptor for the volume
func CreateDockerVolume(ctx context.Context) (vol *Volume, err error) {
dockerVolume, err := Flux.dockerClient.VolumeCreate(ctx, volume.CreateOptions{
Driver: "local",
DriverOpts: map[string]string{},
})
if err != nil {
return nil, fmt.Errorf("failed to create volume: %v", err)
}
logger.Debugw("Volume created", zap.String("volume_id", dockerVolume.Name), zap.String("mountpoint", dockerVolume.Mountpoint))
vol = &Volume{
VolumeID: dockerVolume.Name,
}
return vol, nil
}
// Creates a container in the docker daemon and returns the descriptor for the container
func CreateDockerContainer(ctx context.Context, imageName string, projectName string, vols []*Volume, environment []string, hosts []string) (*Container, error) {
for _, host := range hosts {
if host == ":" {
return nil, fmt.Errorf("invalid host %s", host)
}
}
safeImageName := strings.ReplaceAll(imageName, "/", "_")
containerName := fmt.Sprintf("flux_%s-%s-%s", safeImageName, projectName, time.Now().Format("20060102-150405"))
logger.Debugw("Creating container", zap.String("container_id", containerName))
mounts := make([]mount.Mount, len(vols))
volumes := make(map[string]struct{}, len(vols))
for i, volume := range vols {
volumes[volume.VolumeID] = struct{}{}
mounts[i] = mount.Mount{
Type: mount.TypeVolume,
Source: volume.VolumeID,
Target: volume.Mountpoint,
ReadOnly: false,
}
}
resp, err := Flux.dockerClient.ContainerCreate(ctx, &container.Config{
Image: imageName,
Env: environment,
Volumes: volumes,
},
&container.HostConfig{
RestartPolicy: container.RestartPolicy{Name: container.RestartPolicyUnlessStopped},
NetworkMode: "bridge",
Mounts: mounts,
ExtraHosts: hosts,
},
nil,
nil,
containerName,
)
if err != nil {
return nil, err
}
c := &Container{
ContainerID: [64]byte([]byte(resp.ID)),
Volumes: vols,
}
return c, nil
}
// Create a container given a container configuration and a deployment. This will do a few things:
// 1. Create the container in the docker daemon
// 2. Create the volumes for the container
// 3. Insert the container and volumes into the database
func CreateContainer(ctx context.Context, container *pkg.Container, projectName string, head bool, deployment *Deployment) (c *Container, err error) {
if container.Name == "" {
return nil, fmt.Errorf("container name is empty")
}
if container.ImageName == "" {
return nil, fmt.Errorf("container image name is empty")
}
logger.Debugw("Creating container with image", zap.String("image", container.ImageName))
var volumes []*Volume
// in the head container, we have a default volume where the project is mounted, this is important so that if the project uses sqlite for example,
// all the data will not be lost the second the containers turns off.
if head {
vol, err := CreateDockerVolume(ctx)
if err != nil {
return nil, err
}
vol.Mountpoint = "/workspace"
volumes = append(volumes, vol)
}
for _, containerVolume := range container.Volumes {
vol, err := CreateDockerVolume(ctx)
if err != nil {
return nil, err
}
if containerVolume.Mountpoint == "" {
return nil, fmt.Errorf("mountpoint is empty")
}
if containerVolume.Mountpoint == "/workspace" || containerVolume.Mountpoint == "/" {
return nil, fmt.Errorf("invalid mountpoint")
}
vol.Mountpoint = containerVolume.Mountpoint
volumes = append(volumes, vol)
}
// if the container is the head, build a list of hostnames that the container can reach by name for this deployment
// TODO: this host list should be consistent across all containers in the deployment, not just the head
var hosts []string
if head {
for _, container := range deployment.Containers {
containerName, err := container.GetIp()
if err != nil {
return nil, err
}
hosts = append(hosts, fmt.Sprintf("%s:%s", container.Name, containerName))
}
}
// if the container is not the head, pull the image from docker hub
if !head {
image, err := Flux.dockerClient.ImagePull(ctx, container.ImageName, image.PullOptions{})
if err != nil {
logger.Errorw("Failed to pull image", zap.Error(err))
return nil, err
}
// blcok untile the image is pulled
io.Copy(io.Discard, image)
}
c, err = CreateDockerContainer(ctx, container.ImageName, projectName, volumes, container.Environment, hosts)
if err != nil {
return nil, err
}
c.Name = container.Name
var containerIDString string
err = containerInsertStmt.QueryRow(c.ContainerID[:], head, deployment.ID).Scan(&c.ID, &containerIDString, &c.Head, &c.DeploymentID)
if err != nil {
return nil, err
}
copy(c.ContainerID[:], containerIDString)
tx, err := Flux.db.Begin()
if err != nil {
return nil, err
}
volumeInsertStmt, err := tx.Prepare("INSERT INTO volumes (volume_id, mountpoint, container_id) VALUES (?, ?, ?) RETURNING id, volume_id, mountpoint, container_id")
if err != nil {
logger.Errorw("Failed to prepare statement", zap.Error(err))
tx.Rollback()
return nil, err
}
for _, vol := range c.Volumes {
err = volumeInsertStmt.QueryRow(vol.VolumeID, vol.Mountpoint, c.ContainerID[:]).Scan(&vol.ID, &vol.VolumeID, &vol.Mountpoint, &vol.ContainerID)
if err != nil {
tx.Rollback()
return nil, err
}
}
err = tx.Commit()
if err != nil {
tx.Rollback()
return nil, err
}
c.Deployment = deployment
if head {
deployment.Head = c
}
deployment.Containers = append(deployment.Containers, c)
return c, nil
}
func (c *Container) Upgrade(ctx context.Context, imageName, projectPath string, projectConfig *pkg.ProjectConfig) (*Container, error) {
// Create new container with new image
logger.Debugw("Upgrading container", zap.ByteString("container_id", c.ContainerID[:12]))
if c.Volumes == nil {
return nil, fmt.Errorf("no volumes found for container %s", c.ContainerID[:12])
}
var hosts []string
for _, container := range c.Deployment.Containers {
containerJSON, err := Flux.dockerClient.ContainerInspect(context.Background(), string(container.ContainerID[:]))
if err != nil {
return nil, err
}
hosts = containerJSON.HostConfig.ExtraHosts
}
newContainer, err := CreateDockerContainer(ctx, imageName, projectConfig.Name, c.Volumes, projectConfig.Environment, hosts)
if err != nil {
return nil, err
}
newContainer.Deployment = c.Deployment
var containerIDString string
err = containerInsertStmt.QueryRow(newContainer.ContainerID[:], c.Head, c.Deployment.ID).Scan(&newContainer.ID, &containerIDString, &newContainer.Head, &newContainer.DeploymentID)
if err != nil {
logger.Errorw("Failed to insert container", zap.Error(err))
return nil, err
}
copy(newContainer.ContainerID[:], containerIDString)
tx, err := Flux.db.Begin()
if err != nil {
logger.Errorw("Failed to begin transaction", zap.Error(err))
return nil, err
}
volumeUpdateStmt, err := tx.Prepare("UPDATE volumes SET container_id = ? WHERE id = ? RETURNING id, volume_id, mountpoint, container_id")
if err != nil {
tx.Rollback()
return nil, err
}
for _, vol := range newContainer.Volumes {
err = volumeUpdateStmt.QueryRow(newContainer.ContainerID[:], vol.ID).Scan(&vol.ID, &vol.VolumeID, &vol.Mountpoint, &vol.ContainerID)
if err != nil {
tx.Rollback()
logger.Error("Failed to update volume", zap.Error(err))
return nil, err
}
}
err = tx.Commit()
if err != nil {
tx.Rollback()
return nil, err
}
logger.Debug("Upgraded container")
return newContainer, nil
}
// initial indicates if the container was just created, because if not, we need to fix the extra hsots since it's not guaranteed that the supplemental containers have the same ip
// as they had when the deployment was previously on
func (c *Container) Start(ctx context.Context, initial bool) error {
if !initial && c.Head {
containerJSON, err := Flux.dockerClient.ContainerInspect(ctx, string(c.ContainerID[:]))
if err != nil {
return err
}
// remove yourself
Flux.dockerClient.ContainerRemove(ctx, string(c.ContainerID[:]), container.RemoveOptions{})
var volumes map[string]struct{} = make(map[string]struct{})
var hosts []string
var mounts []mount.Mount
for _, volume := range c.Volumes {
volumes[volume.VolumeID] = struct{}{}
mounts = append(mounts, mount.Mount{
Type: mount.TypeVolume,
Source: volume.VolumeID,
Target: volume.Mountpoint,
ReadOnly: false,
})
}
for _, supplementalContainer := range c.Deployment.Containers {
if supplementalContainer.Head {
continue
}
ip, err := supplementalContainer.GetIp()
if err != nil {
return err
}
hosts = append(hosts, fmt.Sprintf("%s:%s", supplementalContainer.Name, ip))
}
// recreate yourself
resp, err := Flux.dockerClient.ContainerCreate(ctx, &container.Config{
Image: containerJSON.Image,
Env: containerJSON.Config.Env,
Volumes: volumes,
},
&container.HostConfig{
RestartPolicy: container.RestartPolicy{Name: container.RestartPolicyUnlessStopped},
NetworkMode: "bridge",
Mounts: mounts,
ExtraHosts: hosts,
},
nil,
nil,
c.Name,
)
if err != nil {
return err
}
c.ContainerID = [64]byte([]byte(resp.ID))
Flux.db.Exec("UPDATE containers SET container_id = ? WHERE id = ?", c.ContainerID[:], c.ID)
}
return Flux.dockerClient.ContainerStart(ctx, string(c.ContainerID[:]), container.StartOptions{})
}
func (c *Container) Stop(ctx context.Context) error {
return Flux.dockerClient.ContainerStop(ctx, string(c.ContainerID[:]), container.StopOptions{})
}
// Stop and remove a container and all of its volumes
func (c *Container) Remove(ctx context.Context) error {
err := RemoveDockerContainer(ctx, string(c.ContainerID[:]))
if err != nil {
return fmt.Errorf("failed to remove container (%s): %v", c.ContainerID[:12], err)
}
tx, err := Flux.db.Begin()
if err != nil {
logger.Errorw("Failed to begin transaction", zap.Error(err))
return err
}
_, err = tx.Exec("DELETE FROM containers WHERE container_id = ?", c.ContainerID[:])
if err != nil {
tx.Rollback()
return err
}
for _, volume := range c.Volumes {
if err := RemoveVolume(ctx, volume.VolumeID); err != nil {
tx.Rollback()
return fmt.Errorf("failed to remove volume (%s): %v", volume.VolumeID, err)
}
_, err = tx.Exec("DELETE FROM volumes WHERE volume_id = ?", volume.VolumeID)
if err != nil {
tx.Rollback()
return err
}
}
if err := tx.Commit(); err != nil {
logger.Errorw("Failed to commit transaction", zap.Error(err))
return err
}
return nil
}
func (c *Container) Wait(ctx context.Context, port uint16) error {
return WaitForDockerContainer(ctx, string(c.ContainerID[:]), port)
}
type ContainerStatus struct {
Status string
ExitCode int
}
func (c *Container) Status(ctx context.Context) (*ContainerStatus, error) {
containerJSON, err := Flux.dockerClient.ContainerInspect(ctx, string(c.ContainerID[:]))
if err != nil {
return nil, err
}
containerStatus := &ContainerStatus{
Status: containerJSON.State.Status,
ExitCode: containerJSON.State.ExitCode,
}
return containerStatus, nil
}
func (c *Container) GetIp() (string, error) {
containerJSON, err := Flux.dockerClient.ContainerInspect(context.Background(), string(c.ContainerID[:]))
if err != nil {
return "", err
}
ip := containerJSON.NetworkSettings.IPAddress
return ip, nil
}
// Stops and deletes a container from the docker daemon
func RemoveDockerContainer(ctx context.Context, containerID string) error {
if err := Flux.dockerClient.ContainerStop(ctx, containerID, container.StopOptions{}); err != nil {
return fmt.Errorf("failed to stop container (%s): %v", containerID[:12], err)
}
if err := Flux.dockerClient.ContainerRemove(ctx, containerID, container.RemoveOptions{}); err != nil {
return fmt.Errorf("failed to remove container (%s): %v", containerID[:12], err)
}
return nil
}
// scuffed af "health check" for docker containers
func WaitForDockerContainer(ctx context.Context, containerID string, containerPort uint16) error {
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
for {
select {
case <-ctx.Done():
return fmt.Errorf("container failed to become ready in time")
default:
containerJSON, err := Flux.dockerClient.ContainerInspect(ctx, containerID)
if err != nil {
return err
}
if containerJSON.State.Running {
resp, err := http.Get(fmt.Sprintf("http://%s:%d/", containerJSON.NetworkSettings.IPAddress, containerPort))
if err == nil && resp.StatusCode == http.StatusOK {
return nil
}
}
time.Sleep(time.Second)
}
}
}
func GracefullyRemoveDockerContainer(ctx context.Context, containerID string) error {
timeout := 30
err := Flux.dockerClient.ContainerStop(ctx, containerID, container.StopOptions{
Timeout: &timeout,
})
if err != nil {
return fmt.Errorf("failed to stop container: %v", err)
}
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
defer cancel()
for {
select {
case <-ctx.Done():
return Flux.dockerClient.ContainerRemove(ctx, containerID, container.RemoveOptions{})
default:
containerJSON, err := Flux.dockerClient.ContainerInspect(ctx, containerID)
if err != nil {
return err
}
if !containerJSON.State.Running {
return Flux.dockerClient.ContainerRemove(ctx, containerID, container.RemoveOptions{})
}
time.Sleep(time.Second)
}
}
}
func RemoveVolume(ctx context.Context, volumeID string) error {
logger.Debugw("Removed volume", zap.String("volume_id", volumeID))
if err := Flux.dockerClient.VolumeRemove(ctx, volumeID, true); err != nil {
return fmt.Errorf("failed to remove volume (%s): %v", volumeID, err)
}
return nil
}
func findExistingDockerContainers(ctx context.Context, containerPrefix string) (map[string]bool, error) {
containers, err := Flux.dockerClient.ContainerList(ctx, container.ListOptions{
All: true,
})
if err != nil {
return nil, err
}
var existingContainers map[string]bool = make(map[string]bool)
for _, container := range containers {
if strings.HasPrefix(container.Names[0], fmt.Sprintf("/%s-", containerPrefix)) {
existingContainers[container.ID] = true
}
}
return existingContainers, nil
}

518
internal/server/deploy.go Normal file
View File

@@ -0,0 +1,518 @@
package server
import (
"bufio"
"context"
"database/sql"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"os/exec"
"path/filepath"
"sync"
"github.com/joho/godotenv"
"github.com/juls0730/flux/pkg"
"go.uber.org/zap"
)
var (
appInsertStmt *sql.Stmt
)
type DeployRequest struct {
Config multipart.File `form:"config"`
Code multipart.File `form:"code"`
}
type DeployResponse struct {
App App `json:"app"`
}
type DeploymentLock struct {
mu sync.Mutex
deployed map[string]context.CancelFunc
}
func NewDeploymentLock() *DeploymentLock {
return &DeploymentLock{
deployed: make(map[string]context.CancelFunc),
}
}
func (dt *DeploymentLock) StartDeployment(appName string, ctx context.Context) (context.Context, error) {
dt.mu.Lock()
defer dt.mu.Unlock()
// Check if the app is already being deployed
if _, exists := dt.deployed[appName]; exists {
return nil, fmt.Errorf("app %s is already being deployed", appName)
}
// Create a context that can be cancelled
ctx, cancel := context.WithCancel(ctx)
// Store the cancel function
dt.deployed[appName] = cancel
return ctx, nil
}
func (dt *DeploymentLock) CompleteDeployment(appName string) {
dt.mu.Lock()
defer dt.mu.Unlock()
// Remove the app from deployed tracking
if cancel, exists := dt.deployed[appName]; exists {
// Cancel the context
cancel()
// Remove from map
delete(dt.deployed, appName)
}
}
var deploymentLock = NewDeploymentLock()
type DeploymentEvent struct {
Stage string `json:"stage"`
Message interface{} `json:"message"`
StatusCode int `json:"status,omitempty"`
}
func (s *FluxServer) DeployHandler(w http.ResponseWriter, r *http.Request) {
if Flux.appManager == nil {
panic("App manager is nil")
}
w.Header().Set("Content-Type", "test/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
err := r.ParseMultipartForm(10 << 30) // 10 GiB
if err != nil {
logger.Errorw("Failed to parse multipart form", zap.Error(err))
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var deployRequest DeployRequest
deployRequest.Config, _, err = r.FormFile("config")
if err != nil {
http.Error(w, "No flux.json found", http.StatusBadRequest)
return
}
defer deployRequest.Config.Close()
projectConfig := new(pkg.ProjectConfig)
if err := json.NewDecoder(deployRequest.Config).Decode(&projectConfig); err != nil {
logger.Errorw("Failed to decode config", zap.Error(err))
http.Error(w, "Invalid flux.json", http.StatusBadRequest)
return
}
ctx, err := deploymentLock.StartDeployment(projectConfig.Name, r.Context())
if err != nil {
// This will happen if the app is already being deployed
http.Error(w, err.Error(), http.StatusConflict)
return
}
go func() {
<-ctx.Done()
deploymentLock.CompleteDeployment(projectConfig.Name)
}()
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming unsupported!", http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusMultiStatus)
eventChannel := make(chan DeploymentEvent, 10)
defer close(eventChannel)
var wg sync.WaitGroup
defer wg.Wait()
wg.Add(1)
go func(w http.ResponseWriter, flusher http.Flusher) {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
case event, ok := <-eventChannel:
if !ok {
return
}
ev := pkg.DeploymentEvent{
Message: event.Message,
}
eventJSON, err := json.Marshal(ev)
if err != nil {
// Write error directly to ResponseWriter
jsonErr := json.NewEncoder(w).Encode(err)
if jsonErr != nil {
fmt.Fprint(w, "data: {\"message\": \"Error encoding error\"}\n\n")
return
}
fmt.Fprintf(w, "data: %s\n\n", err.Error())
if flusher != nil {
flusher.Flush()
}
return
}
fmt.Fprintf(w, "event: %s\n", event.Stage)
fmt.Fprintf(w, "data: %s\n\n", eventJSON)
if flusher != nil {
flusher.Flush()
}
if event.Stage == "error" || event.Stage == "complete" {
return
}
}
}
}(w, flusher)
eventChannel <- DeploymentEvent{
Stage: "start",
Message: "Uploading code",
}
deployRequest.Code, _, err = r.FormFile("code")
if err != nil {
eventChannel <- DeploymentEvent{
Stage: "error",
Message: "No code archive found",
StatusCode: http.StatusBadRequest,
}
return
}
defer deployRequest.Code.Close()
if projectConfig.Name == "" || projectConfig.Url == "" || projectConfig.Port == 0 {
eventChannel <- DeploymentEvent{
Stage: "error",
Message: "Invalid flux.json, a name, url, and port must be specified",
StatusCode: http.StatusBadRequest,
}
return
}
logger.Infow("Deploying project", zap.String("name", projectConfig.Name), zap.String("url", projectConfig.Url))
projectPath, err := s.UploadAppCode(deployRequest.Code, projectConfig)
if err != nil {
logger.Infow("Failed to upload code", zap.Error(err))
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to upload code: %s", err),
StatusCode: http.StatusInternalServerError,
}
return
}
// We need to pre-process EnvFile since docker has no concept of where the file is, or anything like that, so we have to read from it,
// and place all of it's content into the environment field so that docker can find it later
if projectConfig.EnvFile != "" {
envBytes, err := os.Open(filepath.Join(projectPath, projectConfig.EnvFile))
if err != nil {
logger.Errorw("Failed to open env file", zap.Error(err))
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to open env file: %v", err),
StatusCode: http.StatusInternalServerError,
}
return
}
defer envBytes.Close()
envVars, err := godotenv.Parse(envBytes)
if err != nil {
logger.Errorw("Failed to parse env file", zap.Error(err))
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to parse env file: %v", err),
StatusCode: http.StatusInternalServerError,
}
return
}
for key, value := range envVars {
projectConfig.Environment = append(projectConfig.Environment, fmt.Sprintf("%s=%s", key, value))
}
}
pipeGroup := sync.WaitGroup{}
streamPipe := func(pipe io.ReadCloser) {
pipeGroup.Add(1)
defer pipeGroup.Done()
defer pipe.Close()
scanner := bufio.NewScanner(pipe)
for scanner.Scan() {
line := scanner.Text()
eventChannel <- DeploymentEvent{
Stage: "cmd_output",
Message: line,
}
}
if err := scanner.Err(); err != nil {
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to read pipe: %s", err),
}
logger.Errorw("Error reading pipe", zap.Error(err))
}
}
logger.Debugw("Preparing project", zap.String("name", projectConfig.Name))
eventChannel <- DeploymentEvent{
Stage: "preparing",
Message: "Preparing project",
}
reader, writer := io.Pipe()
prepareCmd := exec.Command("go", "generate")
prepareCmd.Dir = projectPath
prepareCmd.Stdout = writer
prepareCmd.Stderr = writer
err = prepareCmd.Start()
if err != nil {
logger.Errorw("Failed to prepare project", zap.Error(err))
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to prepare project: %s", err),
StatusCode: http.StatusInternalServerError,
}
return
}
go streamPipe(reader)
pipeGroup.Wait()
err = prepareCmd.Wait()
if err != nil {
logger.Errorw("Failed to prepare project", zap.Error(err))
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to prepare project: %s", err),
StatusCode: http.StatusInternalServerError,
}
return
}
writer.Close()
eventChannel <- DeploymentEvent{
Stage: "building",
Message: "Building project image",
}
reader, writer = io.Pipe()
logger.Debugw("Building image for project", zap.String("name", projectConfig.Name))
imageName := fmt.Sprintf("flux_%s-image", projectConfig.Name)
buildCmd := exec.Command("pack", "build", imageName, "--builder", s.config.Builder)
buildCmd.Dir = projectPath
buildCmd.Stdout = writer
buildCmd.Stderr = writer
err = buildCmd.Start()
if err != nil {
logger.Errorw("Failed to build image", zap.Error(err))
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to build image: %s", err),
StatusCode: http.StatusInternalServerError,
}
return
}
go streamPipe(reader)
pipeGroup.Wait()
err = buildCmd.Wait()
if err != nil {
logger.Errorw("Failed to build image", zap.Error(err))
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to build image: %s", err),
StatusCode: http.StatusInternalServerError,
}
return
}
app := Flux.appManager.GetApp(projectConfig.Name)
eventChannel <- DeploymentEvent{
Stage: "creating",
Message: "Creating deployment",
}
if app == nil {
app, err = CreateApp(ctx, imageName, projectPath, projectConfig)
} else {
err = app.Upgrade(ctx, imageName, projectPath, projectConfig)
}
if err != nil {
logger.Errorw("Failed to deploy app", zap.Error(err))
eventChannel <- DeploymentEvent{
Stage: "error",
Message: fmt.Sprintf("Failed to upgrade app: %s", err),
StatusCode: http.StatusInternalServerError,
}
return
}
eventChannel <- DeploymentEvent{
Stage: "complete",
Message: app,
}
logger.Infow("App deployed successfully", zap.String("name", app.Name))
}
func (s *FluxServer) StartDeployHandler(w http.ResponseWriter, r *http.Request) {
name := r.PathValue("name")
app := Flux.appManager.GetApp(name)
if app == nil {
http.Error(w, "App not found", http.StatusNotFound)
return
}
status, err := app.Deployment.Status(r.Context())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if status == "running" {
http.Error(w, "App is already running", http.StatusBadRequest)
return
}
err = app.Deployment.Start(r.Context())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if app.Deployment.Proxy == nil {
app.Deployment.Proxy, _ = app.Deployment.NewDeploymentProxy()
}
w.WriteHeader(http.StatusOK)
}
func (s *FluxServer) StopDeployHandler(w http.ResponseWriter, r *http.Request) {
name := r.PathValue("name")
app := Flux.appManager.GetApp(name)
if app == nil {
http.Error(w, "App not found", http.StatusNotFound)
return
}
status, err := app.Deployment.Status(r.Context())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if status == "stopped" || status == "failed" {
http.Error(w, "App is already stopped", http.StatusBadRequest)
return
}
err = app.Deployment.Stop(r.Context())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (s *FluxServer) DeleteDeployHandler(w http.ResponseWriter, r *http.Request) {
name := r.PathValue("name")
logger.Debugw("Deleting deployment", zap.String("name", name))
err := Flux.appManager.DeleteApp(name)
if err != nil {
logger.Errorw("Failed to delete app", zap.Error(err))
http.Error(w, err.Error(), http.StatusNotFound)
return
}
w.WriteHeader(http.StatusOK)
}
func (s *FluxServer) DeleteAllDeploymentsHandler(w http.ResponseWriter, r *http.Request) {
for _, app := range Flux.appManager.GetAllApps() {
err := Flux.appManager.DeleteApp(app.Name)
if err != nil {
logger.Errorw("Failed to remove app", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
w.WriteHeader(http.StatusOK)
}
func (s *FluxServer) ListAppsHandler(w http.ResponseWriter, r *http.Request) {
// for each app, get the deployment status
var apps []pkg.App
for _, app := range Flux.appManager.GetAllApps() {
var extApp pkg.App
deploymentStatus, err := app.Deployment.Status(r.Context())
if err != nil {
logger.Errorw("Failed to get deployment status", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
extApp.ID = app.ID
extApp.Name = app.Name
extApp.DeploymentID = app.DeploymentID
extApp.DeploymentStatus = deploymentStatus
apps = append(apps, extApp)
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(apps)
}
func (s *FluxServer) DaemonInfoHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(pkg.Info{
Compression: s.config.Compression,
Version: pkg.Version,
})
}

View File

@@ -0,0 +1,224 @@
package server
import (
"context"
"database/sql"
"fmt"
"github.com/juls0730/flux/pkg"
"go.uber.org/zap"
)
var (
deploymentInsertStmt *sql.Stmt
)
type Deployment struct {
ID int64 `json:"id"`
Head *Container `json:"head,omitempty"`
Containers []*Container `json:"containers,omitempty"`
Proxy *DeploymentProxy `json:"-"`
URL string `json:"url"`
Port uint16 `json:"port"`
}
// Creates a deployment row in the database, containting the URL the app should be hosted on (it's public hostname)
// and the port that the web server is listening on
func CreateDeployment(port uint16, appUrl string, db *sql.DB) (*Deployment, error) {
var deployment Deployment
err := deploymentInsertStmt.QueryRow(appUrl, port).Scan(&deployment.ID, &deployment.URL, &deployment.Port)
if err != nil {
logger.Errorw("Failed to insert deployment", zap.Error(err))
return nil, err
}
return &deployment, nil
}
// Takes an existing deployment, and gracefully upgrades the app to a new image
func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig *pkg.ProjectConfig, imageName string, projectPath string) error {
existingContainers, err := findExistingDockerContainers(ctx, projectConfig.Name)
if err != nil {
return fmt.Errorf("failed to find existing containers: %v", err)
}
// we only upgrade the head container, in the future we might want to allow upgrading supplemental containers, but this should work just fine for now.
container, err := deployment.Head.Upgrade(ctx, imageName, projectPath, projectConfig)
if err != nil {
logger.Errorw("Failed to upgrade container", zap.Error(err))
return err
}
// copy(container.ContainerID[:], containerIDString)
deployment.Head = container
deployment.Containers = append(deployment.Containers, container)
logger.Debugw("Starting container", zap.ByteString("container_id", container.ContainerID[:12]))
err = container.Start(ctx, true)
if err != nil {
logger.Errorw("Failed to start container", zap.Error(err))
return err
}
if err := container.Wait(ctx, projectConfig.Port); err != nil {
logger.Errorw("Failed to wait for container", zap.Error(err))
return err
}
if _, err := Flux.db.Exec("UPDATE deployments SET url = ?, port = ? WHERE id = ?", projectConfig.Url, projectConfig.Port, deployment.ID); err != nil {
logger.Errorw("Failed to update deployment", zap.Error(err))
return err
}
// Create a new proxy that points to the new head, and replace the old one, but ensure that the old one is gracefully drained of connections
oldProxy := deployment.Proxy
deployment.Proxy, err = deployment.NewDeploymentProxy()
if err != nil {
logger.Errorw("Failed to create deployment proxy", zap.Error(err))
return err
}
tx, err := Flux.db.Begin()
if err != nil {
logger.Errorw("Failed to begin transaction", zap.Error(err))
return err
}
var containers []*Container
var oldContainers []*Container
// delete the old head container from the database, and update the deployment's container list
for _, container := range deployment.Containers {
if existingContainers[string(container.ContainerID[:])] {
logger.Debugw("Deleting container from db", zap.ByteString("container_id", container.ContainerID[:12]))
_, err = tx.Exec("DELETE FROM containers WHERE id = ?", container.ID)
oldContainers = append(oldContainers, container)
if err != nil {
logger.Errorw("Failed to delete container", zap.Error(err))
tx.Rollback()
return err
}
continue
}
containers = append(containers, container)
}
if err := tx.Commit(); err != nil {
logger.Errorw("Failed to commit transaction", zap.Error(err))
return err
}
// gracefully shutdown the old proxy, or if it doesnt exist, just remove the containers
if oldProxy != nil {
go oldProxy.GracefulShutdown(oldContainers)
} else {
for _, container := range oldContainers {
err := RemoveDockerContainer(context.Background(), string(container.ContainerID[:]))
if err != nil {
logger.Errorw("Failed to remove container", zap.Error(err))
}
}
}
deployment.Containers = containers
return nil
}
// Remove a deployment and all of it's containers
func (d *Deployment) Remove(ctx context.Context) error {
for _, container := range d.Containers {
err := container.Remove(ctx)
if err != nil {
logger.Errorf("Failed to remove container (%s): %v\n", container.ContainerID[:12], err)
return err
}
}
Flux.proxy.RemoveDeployment(d)
_, err := Flux.db.Exec("DELETE FROM deployments WHERE id = ?", d.ID)
if err != nil {
logger.Errorw("Failed to delete deployment", zap.Error(err))
return err
}
return nil
}
func (d *Deployment) Start(ctx context.Context) error {
for _, container := range d.Containers {
err := container.Start(ctx, false)
if err != nil {
logger.Errorf("Failed to start container (%s): %v\n", container.ContainerID[:12], err)
return err
}
}
if d.Proxy == nil {
d.Proxy, _ = d.NewDeploymentProxy()
Flux.proxy.AddDeployment(d)
}
return nil
}
func (d *Deployment) Stop(ctx context.Context) error {
for _, container := range d.Containers {
err := container.Stop(ctx)
if err != nil {
logger.Errorf("Failed to start container (%s): %v\n", container.ContainerID[:12], err)
return err
}
}
Flux.proxy.RemoveDeployment(d)
d.Proxy = nil
return nil
}
// return the status of a deployment, either "running", "failed", "stopped", or "pending", errors if not all
// containers are in the same state
func (d *Deployment) Status(ctx context.Context) (string, error) {
var status *ContainerStatus
if d == nil {
return "", fmt.Errorf("deployment is nil")
}
if d.Containers == nil {
return "", fmt.Errorf("containers are nil")
}
for _, container := range d.Containers {
containerStatus, err := container.Status(ctx)
if err != nil {
logger.Errorw("Failed to get container status", zap.Error(err))
return "", err
}
// if not all containers are in the same state
if status != nil && status.Status != containerStatus.Status {
return "", fmt.Errorf("malformed deployment")
}
status = containerStatus
}
switch status.Status {
case "running":
return "running", nil
case "exited":
if status.ExitCode != 0 {
// non-zero exit code in unix terminology means the program did no complete successfully
return "failed", nil
}
return "stopped", nil
default:
return "pending", nil
}
}

128
internal/server/proxy.go Normal file
View File

@@ -0,0 +1,128 @@
package server
import (
"context"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"sync"
"sync/atomic"
"time"
"go.uber.org/zap"
)
type Proxy struct {
// map[string]*Deployment
deployments sync.Map
}
// Stops forwarding traffic to a deployment
func (p *Proxy) RemoveDeployment(deployment *Deployment) {
p.deployments.Delete(deployment.URL)
}
// Starts forwarding traffic to a deployment. The deployment must be ready to recieve requests before this is called.
func (p *Proxy) AddDeployment(deployment *Deployment) {
logger.Debugw("Adding deployment", zap.String("url", deployment.URL))
p.deployments.Store(deployment.URL, deployment)
}
// This function is responsible for taking an http request and forwarding it to the correct deployment
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
host := r.Host
deployment, ok := p.deployments.Load(host)
if !ok {
http.Error(w, "Not found", http.StatusNotFound)
return
}
// on response from the server, this is decremented
atomic.AddInt64(&deployment.(*Deployment).Proxy.activeRequests, 1)
deployment.(*Deployment).Proxy.proxy.ServeHTTP(w, r)
}
type DeploymentProxy struct {
deployment *Deployment
proxy *httputil.ReverseProxy
gracePeriod time.Duration
activeRequests int64
}
// Creates a proxy for a given deployment
func (deployment *Deployment) NewDeploymentProxy() (*DeploymentProxy, error) {
if deployment == nil {
return nil, fmt.Errorf("deployment is nil")
}
containerJSON, err := Flux.dockerClient.ContainerInspect(context.Background(), string(deployment.Head.ContainerID[:]))
if err != nil {
return nil, err
}
if containerJSON.NetworkSettings.IPAddress == "" {
return nil, fmt.Errorf("no IP address found for container %s", deployment.Head.ContainerID[:12])
}
containerUrl, err := url.Parse(fmt.Sprintf("http://%s:%d", containerJSON.NetworkSettings.IPAddress, deployment.Port))
if err != nil {
return nil, err
}
proxy := &httputil.ReverseProxy{
Director: func(req *http.Request) {
req.URL = &url.URL{
Scheme: containerUrl.Scheme,
Host: containerUrl.Host,
Path: req.URL.Path,
}
req.Host = containerUrl.Host
},
Transport: &http.Transport{
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
MaxIdleConnsPerHost: 100,
},
ModifyResponse: func(resp *http.Response) error {
atomic.AddInt64(&deployment.Proxy.activeRequests, -1)
return nil
},
}
return &DeploymentProxy{
deployment: deployment,
proxy: proxy,
gracePeriod: time.Second * 30,
activeRequests: 0,
}, nil
}
// Drains connections from a proxy
func (dp *DeploymentProxy) GracefulShutdown(oldContainers []*Container) {
ctx, cancel := context.WithTimeout(context.Background(), dp.gracePeriod)
defer cancel()
done := false
for !done {
select {
case <-ctx.Done():
done = true
default:
if atomic.LoadInt64(&dp.activeRequests) == 0 {
done = true
}
time.Sleep(time.Second)
}
}
for _, container := range oldContainers {
err := RemoveDockerContainer(context.Background(), string(container.ContainerID[:]))
if err != nil {
logger.Errorw("Failed to remove container", zap.Error(err))
}
}
}

View File

@@ -0,0 +1,28 @@
CREATE TABLE IF NOT EXISTS deployments (
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
url TEXT NOT NULL UNIQUE,
port INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS apps (
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT NOT NULL UNIQUE,
deployment_id INTEGER,
FOREIGN KEY(deployment_id) REFERENCES deployments(id)
);
CREATE TABLE IF NOT EXISTS containers (
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
container_id TEXT NOT NULL,
head BOOLEAN NOT NULL,
deployment_id INTEGER NOT NULL,
FOREIGN KEY(deployment_id) REFERENCES deployments(id)
);
CREATE TABLE IF NOT EXISTS volumes (
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
volume_id TEXT NOT NULL,
mountpoint TEXT NOT NULL,
container_id INTEGER NOT NULL,
FOREIGN KEY(container_id) REFERENCES containers(id)
);

282
internal/server/server.go Normal file
View File

@@ -0,0 +1,282 @@
package server
import (
"archive/tar"
"compress/gzip"
"context"
"database/sql"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
_ "embed"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/client"
"github.com/juls0730/flux/pkg"
_ "github.com/mattn/go-sqlite3"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var (
//go:embed schema.sql
schemaBytes []byte
DefaultConfig = FluxServerConfig{
Builder: "paketobuildpacks/builder-jammy-tiny",
Compression: pkg.Compression{
Enabled: false,
Level: 0,
},
}
Flux *FluxServer
logger *zap.SugaredLogger
)
type FluxServerConfig struct {
Builder string `json:"builder"`
Compression pkg.Compression `json:"compression"`
}
type FluxServer struct {
config FluxServerConfig
db *sql.DB
proxy *Proxy
rootDir string
appManager *AppManager
dockerClient *client.Client
Logger *zap.SugaredLogger
}
func NewFluxServer() *FluxServer {
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
logger.Fatalw("Failed to create docker client", zap.Error(err))
}
rootDir := os.Getenv("FLUXD_ROOT_DIR")
if rootDir == "" {
rootDir = "/var/fluxd"
}
if err := os.MkdirAll(rootDir, 0755); err != nil {
logger.Fatalw("Failed to create fluxd directory", zap.Error(err))
}
db, err := sql.Open("sqlite3", filepath.Join(rootDir, "fluxd.db"))
if err != nil {
logger.Fatalw("Failed to open database", zap.Error(err))
}
_, err = db.Exec(string(schemaBytes))
if err != nil {
logger.Fatalw("Failed to create database schema", zap.Error(err))
}
err = PrepareDBStatements(db)
if err != nil {
logger.Fatalw("Failed to prepare database statements", zap.Error(err))
}
return &FluxServer{
db: db,
proxy: &Proxy{},
appManager: new(AppManager),
rootDir: rootDir,
dockerClient: dockerClient,
}
}
func (s *FluxServer) Stop() {
s.Logger.Sync()
}
func NewServer() *FluxServer {
verbosity, err := strconv.Atoi(os.Getenv("FLUXD_VERBOSITY"))
if err != nil {
verbosity = 0
}
config := zap.NewProductionConfig()
if os.Getenv("DEBUG") == "true" {
config = zap.NewDevelopmentConfig()
verbosity = -1
}
config.Level = zap.NewAtomicLevelAt(zapcore.Level(verbosity))
lameLogger, err := config.Build()
if err != nil {
logger.Fatalw("Failed to create logger", zap.Error(err))
}
logger = lameLogger.Sugar()
Flux = NewFluxServer()
Flux.Logger = logger
var serverConfig FluxServerConfig
// parse config, if it doesnt exist, create it and use the default config
configPath := filepath.Join(Flux.rootDir, "config.json")
if _, err := os.Stat(configPath); err != nil {
if err := os.MkdirAll(Flux.rootDir, 0755); err != nil {
logger.Fatalw("Failed to create fluxd directory", zap.Error(err))
}
configBytes, err := json.Marshal(DefaultConfig)
if err != nil {
logger.Fatalw("Failed to marshal default config", zap.Error(err))
}
logger.Debugw("Config file not found creating default config file at", zap.String("path", configPath))
if err := os.WriteFile(configPath, configBytes, 0644); err != nil {
logger.Fatalw("Failed to write config file", zap.Error(err))
}
}
configFile, err := os.ReadFile(configPath)
if err != nil {
logger.Fatalw("Failed to read config file", zap.Error(err))
}
if err := json.Unmarshal(configFile, &serverConfig); err != nil {
logger.Fatalw("Failed to parse config file", zap.Error(err))
}
Flux.config = serverConfig
logger.Infof("Pulling builder image %s this may take a while...", serverConfig.Builder)
events, err := Flux.dockerClient.ImagePull(context.Background(), fmt.Sprintf("%s:latest", serverConfig.Builder), image.PullOptions{})
if err != nil {
logger.Fatalw("Failed to pull builder image", zap.Error(err))
}
// blocking until the iamge is pulled
io.Copy(io.Discard, events)
logger.Infow("Successfully pulled builder image", zap.String("image", serverConfig.Builder))
if err := os.MkdirAll(filepath.Join(Flux.rootDir, "apps"), 0755); err != nil {
logger.Fatalw("Failed to create apps directory", zap.Error(err))
}
Flux.appManager.Init()
port := os.Getenv("FLUXD_PROXY_PORT")
if port == "" {
port = "7465"
}
go func() {
logger.Infof("Proxy server starting on http://127.0.0.1:%s", port)
if err := http.ListenAndServe(fmt.Sprintf(":%s", port), Flux.proxy); err != nil {
logger.Fatalw("Failed to start proxy server", zap.Error(err))
}
}()
return Flux
}
// Handler for uploading a project to the server. We have to upload the entire project since we need to build the
// project ourselves to work with the buildpacks
func (s *FluxServer) UploadAppCode(code io.Reader, projectConfig *pkg.ProjectConfig) (string, error) {
var err error
projectPath := filepath.Join(s.rootDir, "apps", projectConfig.Name)
if err = os.MkdirAll(projectPath, 0755); err != nil {
logger.Errorw("Failed to create project directory", zap.Error(err))
return "", err
}
var gzReader *gzip.Reader
defer func() {
if gzReader != nil {
gzReader.Close()
}
}()
if s.config.Compression.Enabled {
gzReader, err = gzip.NewReader(code)
if err != nil {
logger.Infow("Failed to create gzip reader", zap.Error(err))
return "", err
}
}
var tarReader *tar.Reader
if gzReader != nil {
tarReader = tar.NewReader(gzReader)
} else {
tarReader = tar.NewReader(code)
}
logger.Infow("Extracting files for project", zap.String("project", projectPath))
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
logger.Debugw("Failed to read tar header", zap.Error(err))
return "", err
}
// Construct full path
path := filepath.Join(projectPath, header.Name)
// Handle different file types
switch header.Typeflag {
case tar.TypeDir:
if err = os.MkdirAll(path, 0755); err != nil {
logger.Debugw("Failed to extract directory", zap.Error(err))
return "", err
}
case tar.TypeReg:
if err = os.MkdirAll(filepath.Dir(path), 0755); err != nil {
logger.Debugw("Failed to extract directory", zap.Error(err))
return "", err
}
outFile, err := os.Create(path)
if err != nil {
logger.Debugw("Failed to extract file", zap.Error(err))
return "", err
}
defer outFile.Close()
if _, err = io.Copy(outFile, tarReader); err != nil {
logger.Debugw("Failed to copy file during extraction", zap.Error(err))
return "", err
}
}
}
return projectPath, nil
}
// TODO: split each prepare statement into its coresponding module so the statememnts are easier to fine
func PrepareDBStatements(db *sql.DB) error {
var err error
appInsertStmt, err = db.Prepare("INSERT INTO apps (name, deployment_id) VALUES ($1, $2) RETURNING id, name, deployment_id")
if err != nil {
return fmt.Errorf("failed to prepare statement: %v", err)
}
containerInsertStmt, err = db.Prepare("INSERT INTO containers (container_id, head, deployment_id) VALUES (?, ?, ?) RETURNING id, container_id, head, deployment_id")
if err != nil {
return err
}
deploymentInsertStmt, err = db.Prepare("INSERT INTO deployments (url, port) VALUES ($1, $2) RETURNING id, url, port")
if err != nil {
logger.Errorw("Failed to prepare statement", zap.Error(err))
return err
}
return nil
}