add tests, fix bugs, and make cli usable without interactivity

This commit is contained in:
Zoe
2025-05-06 11:00:56 -05:00
parent 4ab58f6324
commit 5bb696052a
12 changed files with 216 additions and 47 deletions

View File

@@ -152,7 +152,7 @@ func (d *DockerClient) GetContainerStatus(containerID DockerID) (*ContainerStatu
}
func (d *DockerClient) StopContainer(ctx context.Context, containerID DockerID) error {
d.logger.Debugw("Stopping container", zap.String("container_id", string(containerID[:12])))
d.logger.Debugw("Stopping container", zap.String("container_id", string(containerID)))
return d.client.ContainerStop(ctx, string(containerID), container.StopOptions{})
}

View File

@@ -557,6 +557,11 @@ func (flux *FluxServer) StopApp(w http.ResponseWriter, r *http.Request) {
}
func (flux *FluxServer) DeleteAllDeploymentsHandler(w http.ResponseWriter, r *http.Request) {
if flux.config.DisableDeleteAll {
http.Error(w, "Delete all deployments is disabled", http.StatusForbidden)
return
}
apps := flux.appManager.GetAllApps()
for _, app := range apps {
err := flux.appManager.DeleteApp(app.Id)
@@ -582,13 +587,23 @@ func (flux *FluxServer) DeleteDeployHandler(w http.ResponseWriter, r *http.Reque
return
}
status, err := app.Deployment.Status(r.Context(), flux.docker, flux.logger)
if err != nil {
flux.logger.Errorw("Failed to get deployment status", zap.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if status != "stopped" {
app.Deployment.Stop(r.Context(), flux.docker)
flux.proxy.RemoveDeployment(app.Deployment.URL)
}
err = flux.appManager.DeleteApp(id)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
flux.proxy.RemoveDeployment(app.Deployment.URL)
w.WriteHeader(http.StatusOK)
}

View File

@@ -62,7 +62,12 @@ func NewServer() *FluxServer {
config := zap.NewProductionConfig()
if os.Getenv("DEBUG") == "true" {
debug, err := strconv.ParseBool(os.Getenv("DEBUG"))
if err != nil {
debug = false
}
if debug {
config = zap.NewDevelopmentConfig()
verbosity = -1
}

View File

@@ -176,9 +176,9 @@ func CreateContainer(ctx context.Context, imageName string, friendlyName string,
// Updates Container in place
func (c *Container) Upgrade(ctx context.Context, imageName string, environment []string, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
// Create new container with new image
logger.Debugw("Upgrading container", zap.String("container_id", string(c.ContainerID[:12])))
logger.Debugw("Upgrading container", zap.String("container_id", string(c.ContainerID)))
if c.Volumes == nil {
return fmt.Errorf("no volumes found for container %s", c.ContainerID[:12])
return fmt.Errorf("no volumes found for container %s", c.ContainerID)
}
containerJSON, err := dockerClient.ContainerInspect(context.Background(), c.ContainerID)
@@ -269,7 +269,7 @@ func (c *Container) Remove(ctx context.Context, dockerClient *docker.DockerClien
func (c *Container) Start(ctx context.Context, initial bool, db *sql.DB, dockerClient *docker.DockerClient, logger *zap.SugaredLogger) error {
logger.Debugf("Starting container %+v", c)
logger.Info("Starting container", zap.String("container_id", string(c.ContainerID)[:12]))
logger.Infow("Starting container", zap.String("container_id", string(c.ContainerID)))
if !initial && c.Head {
logger.Debug("Starting and repairing head container")
@@ -330,7 +330,7 @@ func (c *Container) Wait(ctx context.Context, port uint16, dockerClient *docker.
func (c *Container) GetIp(dockerClient *docker.DockerClient, logger *zap.SugaredLogger) (string, error) {
containerJSON, err := dockerClient.ContainerInspect(context.Background(), c.ContainerID)
if err != nil {
logger.Errorw("Failed to inspect container", zap.Error(err), zap.String("container_id", string(c.ContainerID[:12])))
logger.Errorw("Failed to inspect container", zap.Error(err), zap.String("container_id", string(c.ContainerID)))
return "", err
}

View File

@@ -77,7 +77,7 @@ func (d *Deployment) Start(ctx context.Context, dockerClient *docker.DockerClien
for _, container := range d.containers {
err := dockerClient.StartContainer(ctx, container.ContainerID)
if err != nil {
return fmt.Errorf("failed to start container (%s): %v", container.ContainerID[:12], err)
return fmt.Errorf("failed to start container (%s): %v", container.ContainerID, err)
}
}
@@ -91,7 +91,7 @@ func (d *Deployment) GetInternalUrl(dockerClient *docker.DockerClient) (*url.URL
}
if containerJSON.NetworkSettings.IPAddress == "" {
return nil, fmt.Errorf("no IP address found for container %s", d.Head().ContainerID[:12])
return nil, fmt.Errorf("no IP address found for container %s", d.Head().ContainerID)
}
containerUrl, err := url.Parse(fmt.Sprintf("http://%s:%d", containerJSON.NetworkSettings.IPAddress, d.Port))
@@ -106,7 +106,7 @@ func (d *Deployment) Stop(ctx context.Context, dockerClient *docker.DockerClient
for _, container := range d.containers {
err := dockerClient.StopContainer(ctx, container.ContainerID)
if err != nil {
return fmt.Errorf("failed to stop container (%s): %v", container.ContainerID[:12], err)
return fmt.Errorf("failed to stop container (%s): %v", container.ContainerID, err)
}
}
return nil
@@ -141,7 +141,7 @@ func (deployment *Deployment) Status(ctx context.Context, dockerClient *docker.D
// if the head is running, but the supplemental container is stopped, return "failed"
if headStatus.Status == "running" && containerStatus.Status != "running" {
logger.Debugw("Supplemental container is not running but head is, returning to failed state", zap.String("container_id", string(container.ContainerID[:12])))
logger.Debugw("Supplemental container is not running but head is, returning to failed state", zap.String("container_id", string(container.ContainerID)))
for _, supplementalContainer := range deployment.containers {
err := dockerClient.StopContainer(ctx, supplementalContainer.ContainerID)
if err != nil {
@@ -183,7 +183,7 @@ func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig *pkg.Pr
db.Exec("DELETE FROM containers WHERE id = ?", oldHeadContainer.ID)
newHeadContainer := deployment.Head()
logger.Debugw("Starting container", zap.String("container_id", string(newHeadContainer.ContainerID[:12])))
logger.Debugw("Starting container", zap.String("container_id", string(newHeadContainer.ContainerID)))
err = newHeadContainer.Start(ctx, true, db, dockerClient, logger)
if err != nil {
logger.Errorw("Failed to start container", zap.Error(err))
@@ -221,7 +221,13 @@ func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig *pkg.Pr
// gracefully shutdown the old proxy, or if it doesnt exist, just remove the containers
if ok {
go oldProxy.GracefulShutdown(func() {
err := dockerClient.DeleteDockerContainer(context.Background(), oldHeadContainer.ContainerID)
err := dockerClient.StopContainer(context.Background(), oldHeadContainer.ContainerID)
if err != nil {
logger.Errorw("Failed to stop container", zap.Error(err))
return
}
err = dockerClient.DeleteDockerContainer(context.Background(), oldHeadContainer.ContainerID)
if err != nil {
logger.Errorw("Failed to remove container", zap.Error(err))
}