Massive architectural rework

This commit massively overhauls the project's structure to simplify
development. Most parts are now correctly compartmentalized and
dependencies are passed in a sane way rather than global variables
galore xd.
This commit is contained in:
Zoe
2025-05-02 12:15:40 -05:00
parent f4bf2ff5a1
commit c891c24843
50 changed files with 2684 additions and 2410 deletions

28
internal/models/app.go Normal file
View File

@@ -0,0 +1,28 @@
package models
import (
"context"
"database/sql"
"github.com/google/uuid"
docker "github.com/juls0730/flux/internal/docker"
"go.uber.org/zap"
)
type App struct {
Id uuid.UUID `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Deployment *Deployment `json:"-"`
DeploymentID int64 `json:"deployment_id,omitempty"`
}
func (app *App) Remove(ctx context.Context, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
app.Deployment.Remove(ctx, dockerClient, db, logger)
_, err := db.ExecContext(ctx, "DELETE FROM apps WHERE id = ?", app.Id[:])
if err != nil {
logger.Errorw("Failed to delete app", zap.Error(err))
return err
}
return nil
}

View File

@@ -0,0 +1,354 @@
package models
import (
"context"
"database/sql"
"fmt"
"io"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
docker "github.com/juls0730/flux/internal/docker"
"github.com/juls0730/flux/pkg"
"go.uber.org/zap"
)
type Volume struct {
ID int64 `json:"id"`
Mountpoint string `json:"mountpoint"`
VolumeID string `json:"volume_id"`
ContainerID string `json:"container_id"`
}
func (v *Volume) Remove(ctx context.Context, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
logger.Debugw("Removing volume", zap.String("volume_id", v.VolumeID))
_, err := db.ExecContext(ctx, "DELETE FROM volumes WHERE volume_id = ?", v.VolumeID)
if err != nil {
logger.Errorw("Failed to delete volume", zap.Error(err))
return err
}
return dockerClient.DeleteDockerVolume(ctx, v.VolumeID)
}
type Container struct {
ID int64 `json:"id"`
Name string `json:"name"` // name of the container in the docker daemon
ContainerID docker.DockerID `json:"container_id"`
Head bool `json:"head"` // if the container is the head of the deployment
FriendlyName string `json:"friendly_name"` // name used by other containers to reach this container
Volumes []*Volume `json:"volumes"`
Deployment *Deployment `json:"-"`
DeploymentID int64 `json:"deployment_id"`
}
// Create a container given a container configuration and a deployment. This will do a few things:
//
// 1. Create the container in the docker daemon
//
// 2. Create the volumes for the container
//
// 3. Insert the container and volumes into the database
//
// This will not mess with containers already in the Deployment object, it is expected that this function will only be
// called when the app in initially created
func CreateContainer(ctx context.Context, imageName string, friendlyName string, head bool, environment []string, containerVols []pkg.Volume, deployment *Deployment, logger *zap.SugaredLogger, dockerClient *docker.DockerClient, db *sql.DB) (c *Container, err error) {
if friendlyName == "" {
return nil, fmt.Errorf("container friendly name is empty")
}
if imageName == "" {
return nil, fmt.Errorf("container image name is empty")
}
logger.Debugw("Creating container with image", zap.String("image", imageName))
var volumes []*docker.DockerVolume
// in the head container, we have a default volume where the project is mounted, this is important so that if the project uses sqlite for example,
// all the data will not be lost the second the containers turns off.
if head {
vol, err := dockerClient.CreateDockerVolume(ctx)
if err != nil {
logger.Errorw("Failed to create head's workspace volume", zap.Error(err))
return nil, err
}
vol.Mountpoint = "/workspace"
volumes = append(volumes, vol)
}
for _, containerVolume := range containerVols {
vol, err := dockerClient.CreateDockerVolume(ctx)
if err != nil {
logger.Errorw("Failed to create volume", zap.Error(err))
return nil, err
}
if containerVolume.Mountpoint == "" {
return nil, fmt.Errorf("mountpoint is empty")
}
if containerVolume.Mountpoint == "/workspace" || containerVolume.Mountpoint == "/" {
return nil, fmt.Errorf("invalid mountpoint")
}
vol.Mountpoint = containerVolume.Mountpoint
volumes = append(volumes, vol)
}
// if the container is the head, build a list of hostnames that the container can reach by name for this deployment
// TODO: this host list should be consistent across all containers in the deployment, not just the head
var hosts []string
if head {
logger.Debug("Building host list")
for _, container := range deployment.Containers() {
containerName, err := container.GetIp(dockerClient, logger)
if err != nil {
logger.Errorw("Failed to get container ip", zap.Error(err))
return nil, err
}
hosts = append(hosts, fmt.Sprintf("%s:%s", container.FriendlyName, containerName))
}
}
// if the container is not the head, pull the image from docker hub
if !head {
logger.Debug("Pulling image", zap.String("image", imageName))
image, err := dockerClient.ImagePull(ctx, imageName, image.PullOptions{})
if err != nil {
logger.Errorw("Failed to pull image", zap.Error(err))
return nil, err
}
// blcok untile the image is pulled
io.Copy(io.Discard, image)
}
logger.Debugw("Creating container", zap.String("image", imageName))
dockerContainer, err := dockerClient.CreateDockerContainer(ctx, imageName, volumes, environment, hosts, nil)
if err != nil {
logger.Errorw("Failed to create container", zap.Error(err))
return nil, err
}
c = &Container{
ContainerID: dockerContainer.ID,
Name: dockerContainer.Name,
FriendlyName: friendlyName,
}
err = db.QueryRow("INSERT INTO containers (container_id, head, deployment_id) VALUES (?, ?, ?) RETURNING id, container_id, head, deployment_id", string(c.ContainerID), head, deployment.ID).Scan(&c.ID, &c.ContainerID, &c.Head, &c.DeploymentID)
if err != nil {
logger.Errorw("Failed to insert container", zap.Error(err))
return nil, err
}
tx, err := db.Begin()
if err != nil {
logger.Errorw("Failed to begin transaction", zap.Error(err))
return nil, err
}
volumeInsertStmt, err := tx.Prepare("INSERT INTO volumes (volume_id, mountpoint, container_id) VALUES (?, ?, ?) RETURNING id, volume_id, mountpoint, container_id")
if err != nil {
logger.Errorw("Failed to prepare statement", zap.Error(err))
tx.Rollback()
return nil, err
}
for _, vol := range c.Volumes {
logger.Debug("Inserting volume", zap.String("volume_id", vol.VolumeID), zap.String("mountpoint", vol.Mountpoint), zap.String("container_id", string(c.ContainerID)))
err = volumeInsertStmt.QueryRow(vol.VolumeID, vol.Mountpoint, c.ContainerID).Scan(&vol.ID, &vol.VolumeID, &vol.Mountpoint, &vol.ContainerID)
if err != nil {
logger.Errorw("Failed to insert volume", zap.Error(err))
tx.Rollback()
return nil, err
}
}
err = tx.Commit()
if err != nil {
logger.Errorw("Failed to commit transaction", zap.Error(err))
tx.Rollback()
return nil, err
}
c.Deployment = deployment
deployment.AppendContainer(c)
return c, nil
}
// Updates Container in place
func (c *Container) Upgrade(ctx context.Context, imageName string, environment []string, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
// Create new container with new image
logger.Debugw("Upgrading container", zap.String("container_id", string(c.ContainerID[:12])))
if c.Volumes == nil {
return fmt.Errorf("no volumes found for container %s", c.ContainerID[:12])
}
containerJSON, err := dockerClient.ContainerInspect(context.Background(), c.ContainerID)
if err != nil {
return err
}
hosts := containerJSON.HostConfig.ExtraHosts
var dockerVolumes []*docker.DockerVolume
for _, volume := range c.Volumes {
dockerVolumes = append(dockerVolumes, &docker.DockerVolume{
VolumeID: volume.VolumeID,
Mountpoint: volume.Mountpoint,
})
}
newDockerContainer, err := dockerClient.CreateDockerContainer(ctx, imageName, dockerVolumes, environment, hosts, nil)
if err != nil {
return err
}
err = db.QueryRow("INSERT INTO containers (container_id, head, deployment_id) VALUES (?, ?, ?) RETURNING id, container_id, head, deployment_id", newDockerContainer.ID, c.Head, c.Deployment.ID).Scan(&c.ID, &c.ContainerID, &c.Head, &c.DeploymentID)
if err != nil {
logger.Errorw("Failed to insert container", zap.Error(err))
return err
}
tx, err := db.Begin()
if err != nil {
logger.Errorw("Failed to begin transaction", zap.Error(err))
return err
}
volumeUpdateStmt, err := tx.Prepare("UPDATE volumes SET container_id = ? WHERE id = ? RETURNING id, volume_id, mountpoint, container_id")
if err != nil {
tx.Rollback()
logger.Errorw("Failed to prepare statement", zap.Error(err))
return err
}
for _, vol := range c.Volumes {
err = volumeUpdateStmt.QueryRow(c.ID, vol.ID).Scan(&vol.ID, &vol.VolumeID, &vol.Mountpoint, &vol.ContainerID)
if err != nil {
tx.Rollback()
logger.Error("Failed to update volume", zap.Error(err))
return err
}
}
err = tx.Commit()
if err != nil {
tx.Rollback()
logger.Errorw("Failed to commit transaction", zap.Error(err))
return err
}
logger.Debug("Upgraded container")
return nil
}
func (c *Container) Remove(ctx context.Context, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
logger.Debugw("Removing container", zap.String("container_id", string(c.ContainerID)))
err := dockerClient.StopContainer(ctx, c.ContainerID)
if err != nil {
logger.Errorw("Failed to stop container", zap.Error(err))
return err
}
for _, volume := range c.Volumes {
logger.Debugw("Removing volume", zap.String("volume_id", volume.VolumeID))
err := volume.Remove(ctx, dockerClient, db, logger)
if err != nil {
return err
}
}
_, err = db.ExecContext(ctx, "DELETE FROM containers WHERE container_id = ?", c.ContainerID)
if err != nil {
logger.Errorw("Failed to delete container", zap.Error(err))
return err
}
return dockerClient.ContainerRemove(ctx, c.ContainerID, container.RemoveOptions{})
}
func (c *Container) Start(ctx context.Context, initial bool, db *sql.DB, dockerClient *docker.DockerClient, logger *zap.SugaredLogger) error {
logger.Debugf("Starting container %+v", c)
logger.Info("Starting container", zap.String("container_id", string(c.ContainerID)[:12]))
if !initial && c.Head {
logger.Debug("Starting and repairing head container")
containerJSON, err := dockerClient.ContainerInspect(ctx, c.ContainerID)
if err != nil {
return err
}
// remove yourself
dockerClient.ContainerRemove(ctx, c.ContainerID, container.RemoveOptions{})
var volumes []*docker.DockerVolume
var hosts []string
for _, volume := range c.Volumes {
volumes = append(volumes, &docker.DockerVolume{
VolumeID: volume.VolumeID,
Mountpoint: volume.Mountpoint,
})
}
for _, supplementalContainer := range c.Deployment.Containers() {
if supplementalContainer.Head {
continue
}
ip, err := supplementalContainer.GetIp(dockerClient, logger)
if err != nil {
return err
}
hosts = append(hosts, fmt.Sprintf("%s:%s", supplementalContainer.FriendlyName, ip))
}
// recreate yourself
resp, err := dockerClient.CreateDockerContainer(ctx,
containerJSON.Image,
volumes,
containerJSON.Config.Env,
hosts,
&c.Name,
)
if err != nil {
return err
}
c.ContainerID = resp.ID
db.Exec("UPDATE containers SET container_id = ? WHERE id = ?", string(c.ContainerID), c.ID)
}
return dockerClient.ContainerStart(ctx, string(c.ContainerID), container.StartOptions{})
}
func (c *Container) Wait(ctx context.Context, port uint16, dockerClient *docker.DockerClient) error {
return dockerClient.ContainerWait(ctx, c.ContainerID, port)
}
func (c *Container) GetIp(dockerClient *docker.DockerClient, logger *zap.SugaredLogger) (string, error) {
containerJSON, err := dockerClient.ContainerInspect(context.Background(), c.ContainerID)
if err != nil {
logger.Errorw("Failed to inspect container", zap.Error(err), zap.String("container_id", string(c.ContainerID[:12])))
return "", err
}
ip := containerJSON.NetworkSettings.IPAddress
return ip, nil
}

View File

@@ -0,0 +1,243 @@
package models
import (
"context"
"database/sql"
"fmt"
"net/url"
"github.com/juls0730/flux/internal/docker"
proxyManagerService "github.com/juls0730/flux/internal/services/proxy"
"github.com/juls0730/flux/pkg"
"go.uber.org/zap"
)
type Deployment struct {
ID int64 `json:"id"`
containers []*Container `json:"-"`
URL string `json:"url"`
Port uint16 `json:"port"`
headCache *Container
}
func NewDeployment() *Deployment {
return &Deployment{
containers: make([]*Container, 0),
}
}
func (d *Deployment) Remove(ctx context.Context, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
logger.Debugw("Removing deployment", zap.Int64("id", d.ID))
for _, container := range d.containers {
err := container.Remove(ctx, dockerClient, db, logger)
if err != nil {
return err
}
}
db.ExecContext(ctx, "DELETE FROM deployments WHERE id = ?", d.ID)
return nil
}
func (d *Deployment) Head() *Container {
if d.headCache != nil {
return d.headCache
}
for _, container := range d.containers {
if container.Head {
d.headCache = container
return container
}
}
return nil
}
func (d *Deployment) Containers() []*Container {
if d.containers == nil {
return nil
}
// copy the slice so that we don't modify the original
containers := make([]*Container, len(d.containers))
copy(containers, d.containers)
return containers
}
func (d *Deployment) AppendContainer(container *Container) {
d.headCache = nil
d.containers = append(d.containers, container)
}
func (d *Deployment) Start(ctx context.Context, dockerClient *docker.DockerClient) error {
for _, container := range d.containers {
err := dockerClient.StartContainer(ctx, container.ContainerID)
if err != nil {
return fmt.Errorf("failed to start container (%s): %v", container.ContainerID[:12], err)
}
}
return nil
}
func (d *Deployment) GetInternalUrl(dockerClient *docker.DockerClient) (*url.URL, error) {
containerJSON, err := dockerClient.ContainerInspect(context.Background(), d.Head().ContainerID)
if err != nil {
return nil, err
}
if containerJSON.NetworkSettings.IPAddress == "" {
return nil, fmt.Errorf("no IP address found for container %s", d.Head().ContainerID[:12])
}
containerUrl, err := url.Parse(fmt.Sprintf("http://%s:%d", containerJSON.NetworkSettings.IPAddress, d.Port))
if err != nil {
return nil, err
}
return containerUrl, nil
}
func (d *Deployment) Stop(ctx context.Context, dockerClient *docker.DockerClient) error {
for _, container := range d.containers {
err := dockerClient.StopContainer(ctx, container.ContainerID)
if err != nil {
return fmt.Errorf("failed to stop container (%s): %v", container.ContainerID[:12], err)
}
}
return nil
}
// gets the status of the head container, and attempt to get the supplemental containers in an aligned state
func (deployment *Deployment) Status(ctx context.Context, dockerClient *docker.DockerClient, logger *zap.SugaredLogger) (string, error) {
// first, get the status of the head container
headStatus, err := dockerClient.GetContainerStatus(deployment.Head().ContainerID)
if err != nil {
return "", err
}
// then, check the status of all supplemental containers
for _, container := range deployment.containers {
if container.Head {
continue
}
containerStatus, err := dockerClient.GetContainerStatus(container.ContainerID)
if err != nil {
return "", err
}
// if the head is stopped, but the supplemental container is running, stop the supplemental container
if headStatus.Status != "running" && containerStatus.Status == "running" {
err := dockerClient.StopContainer(ctx, container.ContainerID)
if err != nil {
return "", err
}
}
// if the head is running, but the supplemental container is stopped, return "failed"
if headStatus.Status == "running" && containerStatus.Status != "running" {
logger.Debugw("Supplemental container is not running but head is, returning to failed state", zap.String("container_id", string(container.ContainerID[:12])))
for _, supplementalContainer := range deployment.containers {
err := dockerClient.StopContainer(ctx, supplementalContainer.ContainerID)
if err != nil {
return "", err
}
}
return "failed", nil
}
}
switch headStatus.Status {
case "running":
return "running", nil
case "exited", "dead":
if headStatus.ExitCode != 0 {
// non-zero exit code in unix terminology means the program did not complete successfully
return "failed", nil
}
return "stopped", nil
default:
return "stopped", nil
}
}
// Takes an existing deployment, and gracefully upgrades the app to a new image
func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig *pkg.ProjectConfig, imageName string, dockerClient *docker.DockerClient, proxyManager *proxyManagerService.ProxyManager, db *sql.DB, logger *zap.SugaredLogger) error {
// copy the old head container since Upgrade updates the container in place
oldHeadContainer := *deployment.Head()
oldDeploymentInternalUrl, err := deployment.GetInternalUrl(dockerClient)
if err != nil {
logger.Errorw("Failed to get internal url", zap.Error(err))
return err
}
// we only upgrade the head container, in the future we might want to allow upgrading supplemental containers, but this should work just fine for now.
err = deployment.Head().Upgrade(ctx, imageName, projectConfig.Environment, dockerClient, db, logger)
if err != nil {
logger.Errorw("Failed to upgrade container", zap.Error(err))
return err
}
db.Exec("DELETE FROM containers WHERE id = ?", oldHeadContainer.ID)
newHeadContainer := deployment.Head()
logger.Debugw("Starting container", zap.String("container_id", string(newHeadContainer.ContainerID[:12])))
err = newHeadContainer.Start(ctx, true, db, dockerClient, logger)
if err != nil {
logger.Errorw("Failed to start container", zap.Error(err))
return err
}
if err := newHeadContainer.Wait(ctx, projectConfig.Port, dockerClient); err != nil {
logger.Errorw("Failed to wait for container", zap.Error(err))
return err
}
if _, err := db.Exec("UPDATE deployments SET url = ?, port = ? WHERE id = ?", projectConfig.Url, projectConfig.Port, deployment.ID); err != nil {
logger.Errorw("Failed to update deployment", zap.Error(err))
return err
}
// Create a new proxy that points to the new head, and replace the old one, but ensure that the old one is gracefully drained of connections
oldProxy, ok := proxyManager.Load(oldDeploymentInternalUrl.String())
newDeploymentInternalUrl, err := deployment.GetInternalUrl(dockerClient)
if err != nil {
logger.Errorw("Failed to get internal url", zap.Error(err))
return err
}
newProxy, err := proxyManagerService.NewDeploymentProxy(*newDeploymentInternalUrl)
if err != nil {
logger.Errorw("Failed to create deployment proxy", zap.Error(err))
return err
}
proxyManager.RemoveDeployment(deployment.URL)
proxyManager.AddProxy(projectConfig.Url, newProxy)
deployment.URL = projectConfig.Url
// gracefully shutdown the old proxy, or if it doesnt exist, just remove the containers
if !ok {
go oldProxy.GracefulShutdown(func() {
err := dockerClient.DeleteDockerContainer(context.Background(), oldHeadContainer.ContainerID)
if err != nil {
logger.Errorw("Failed to remove container", zap.Error(err))
}
})
} else {
err := dockerClient.DeleteDockerContainer(context.Background(), oldHeadContainer.ContainerID)
if err != nil {
logger.Errorw("Failed to remove container", zap.Error(err))
}
}
return nil
}