fix steaming, stale data, proxy bugs and more

This commit is contained in:
Zoe
2024-12-13 03:33:04 -06:00
parent e46bb05b39
commit 7689999413
13 changed files with 798 additions and 658 deletions

View File

@@ -11,21 +11,19 @@ import (
var (
deploymentInsertStmt *sql.Stmt
containerInsertStmt *sql.Stmt
volumeInsertStmt *sql.Stmt
updateVolumeStmt *sql.Stmt
)
type Deployment struct {
ID int64 `json:"id"`
Containers []Container `json:"containers,omitempty"`
Head *Container `json:"head,omitempty"`
Containers []*Container `json:"containers,omitempty"`
Proxy *DeploymentProxy `json:"-"`
URL string `json:"url"`
Port uint16 `json:"port"`
}
// Creates a deployment and containers in the database
func CreateDeployment(container Container, port uint16, appUrl string, db *sql.DB) (Deployment, error) {
func CreateDeployment(port uint16, appUrl string, db *sql.DB) (*Deployment, error) {
var deployment Deployment
var err error
@@ -33,115 +31,33 @@ func CreateDeployment(container Container, port uint16, appUrl string, db *sql.D
deploymentInsertStmt, err = db.Prepare("INSERT INTO deployments (url, port) VALUES ($1, $2) RETURNING id, url, port")
if err != nil {
log.Printf("Failed to prepare statement: %v\n", err)
return Deployment{}, err
return nil, err
}
}
err = deploymentInsertStmt.QueryRow(appUrl, port).Scan(&deployment.ID, &deployment.URL, &deployment.Port)
if err != nil {
log.Printf("Failed to insert deployment: %v\n", err)
return Deployment{}, err
return nil, err
}
if containerInsertStmt == nil {
containerInsertStmt, err = db.Prepare("INSERT INTO containers (container_id, deployment_id, head) VALUES ($1, $2, $3) RETURNING id, container_id, deployment_id, head")
if err != nil {
log.Printf("Failed to prepare statement: %v\n", err)
return Deployment{}, err
}
}
var containerIDString string
err = containerInsertStmt.QueryRow(container.ContainerID[:], deployment.ID, true).Scan(&container.ID, &containerIDString, &container.DeploymentID, &container.Head)
if err != nil {
log.Printf("Failed to get container id: %v\n", err)
return Deployment{}, err
}
copy(container.ContainerID[:], containerIDString)
for i, volume := range container.Volumes {
if volumeInsertStmt == nil {
volumeInsertStmt, err = db.Prepare("INSERT INTO volumes (volume_id, container_id) VALUES (?, ?) RETURNING id, volume_id, container_id")
if err != nil {
log.Printf("Failed to prepare statement: %v\n", err)
return Deployment{}, err
}
}
if err := volumeInsertStmt.QueryRow(volume.VolumeID, container.ID).Scan(&container.Volumes[i].ID, &container.Volumes[i].VolumeID, &container.Volumes[i].ContainerID); err != nil {
log.Printf("Failed to insert volume: %v\n", err)
return Deployment{}, err
}
}
container.Deployment = &deployment
deployment.Containers = append(deployment.Containers, container)
return deployment, nil
return &deployment, nil
}
func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig pkg.ProjectConfig, imageName string, projectPath string) error {
existingContainers, err := findExistingDockerContainers(ctx, projectConfig.Name)
if err != nil {
return fmt.Errorf("Failed to find existing containers: %v", err)
return fmt.Errorf("failed to find existing containers: %v", err)
}
// Deploy new container before deleting old one
c, err := CreateDockerContainer(ctx, imageName, projectPath, projectConfig)
if err != nil || c == nil {
log.Printf("Failed to create container: %v\n", err)
return err
}
var container Container = *c
if containerInsertStmt == nil {
containerInsertStmt, err = Flux.db.Prepare("INSERT INTO containers (container_id, deployment_id, head) VALUES ($1, $2, $3) RETURNING id, container_id, deployment_id, head")
if err != nil {
log.Printf("Failed to prepare statement: %v\n", err)
return err
}
}
var containerIDString string
err = containerInsertStmt.QueryRow(container.ContainerID[:], deployment.ID, true).Scan(&container.ID, &containerIDString, &container.DeploymentID, &container.Head)
container, err := deployment.Head.Upgrade(ctx, imageName, projectPath, projectConfig)
if err != nil {
log.Printf("Failed to get container id: %v\n", err)
log.Printf("Failed to upgrade container: %v\n", err)
return err
}
container.Deployment = deployment
// the space time complexity of this is pretty bad, but it works
for _, existingContainer := range deployment.Containers {
if !existingContainer.Head {
continue
}
for _, volume := range existingContainer.Volumes {
var targetVolume *Volume
for i, volume := range container.Volumes {
if volume.VolumeID == volume.VolumeID {
targetVolume = &container.Volumes[i]
break
}
}
if updateVolumeStmt == nil {
updateVolumeStmt, err = Flux.db.Prepare("UPDATE volumes SET container_id = ? WHERE id = ? RETURNING id, volume_id, container_id")
if err != nil {
log.Printf("Failed to prepare statement: %v\n", err)
return err
}
}
err := updateVolumeStmt.QueryRow(container.ID, volume.ID).Scan(&targetVolume.ID, &targetVolume.VolumeID, &targetVolume.ContainerID)
if err != nil {
log.Printf("Failed to update volume: %v\n", err)
return err
}
}
}
copy(container.ContainerID[:], containerIDString)
// copy(container.ContainerID[:], containerIDString)
deployment.Head = container
deployment.Containers = append(deployment.Containers, container)
log.Printf("Starting container %s...\n", container.ContainerID[:12])
@@ -163,7 +79,7 @@ func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig pkg.Pro
// Create a new proxy that points to the new head, and replace the old one, but ensure that the old one is gracefully shutdown
oldProxy := deployment.Proxy
deployment.Proxy, err = NewDeploymentProxy(deployment, &container)
deployment.Proxy, err = deployment.NewDeploymentProxy()
if err != nil {
log.Printf("Failed to create deployment proxy: %v\n", err)
return err
@@ -175,16 +91,17 @@ func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig pkg.Pro
return err
}
var containers []Container
var containers []*Container
var oldContainers []*Container
for _, container := range deployment.Containers {
if existingContainers[string(container.ContainerID[:])] {
log.Printf("Deleting container from db: %s\n", container.ContainerID[:12])
_, err = tx.Exec("DELETE FROM containers WHERE id = ?", container.ID)
oldContainers = append(oldContainers, &container)
oldContainers = append(oldContainers, container)
if err != nil {
log.Printf("Failed to delete container: %v\n", err)
tx.Rollback()
return err
}
@@ -212,9 +129,6 @@ func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig pkg.Pro
}
deployment.Containers = containers
Flux.proxy.AddDeployment(deployment)
return nil
}
@@ -245,6 +159,11 @@ func (d *Deployment) Start(ctx context.Context) error {
}
}
if d.Proxy == nil {
d.Proxy, _ = d.NewDeploymentProxy()
Flux.proxy.AddDeployment(d)
}
return nil
}
@@ -257,19 +176,20 @@ func (d *Deployment) Stop(ctx context.Context) error {
}
}
Flux.proxy.RemoveDeployment(d)
d.Proxy = nil
return nil
}
func (d *Deployment) Status(ctx context.Context) (string, error) {
var status string
if d == nil {
fmt.Printf("Deployment is nil\n")
return "stopped", nil
return "", fmt.Errorf("deployment is nil")
}
if d.Containers == nil {
fmt.Printf("Containers are nil\n")
return "stopped", nil
return "", fmt.Errorf("containers are nil")
}
for _, container := range d.Containers {
@@ -281,7 +201,7 @@ func (d *Deployment) Status(ctx context.Context) (string, error) {
// if not all containers are in the same state
if status != "" && status != containerStatus {
return "", fmt.Errorf("Malformed deployment")
return "", fmt.Errorf("malformed deployment")
}
status = containerStatus