Compare commits
5 Commits
main
...
refactor/d
| Author | SHA1 | Date | |
|---|---|---|---|
|
4ab58f6324
|
|||
| c891c24843 | |||
|
f4bf2ff5a1
|
|||
|
79322c4c5e
|
|||
| d501775ae6 |
19
.vscode/launch.json
vendored
Normal file
19
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
|
||||
{
|
||||
"name": "Debug Daemon",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"env": {
|
||||
"FLUXD_ROOT_DIR": "${workspaceFolder}/fluxdd"
|
||||
},
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/cmd/daemon/main.go",
|
||||
}
|
||||
]
|
||||
}
|
||||
54
README.md
54
README.md
@@ -1,6 +1,20 @@
|
||||
# Flux
|
||||
|
||||
Flux is a lightweight self-hosted pseudo-PaaS for hosting Golang web apps with ease. Built on top of [Buildpacks](https://buildpacks.io/) and [Docker](https://docs.docker.com/get-docker/), Flux simplifies the deployment process with a focus on similicity, speed, and reliability.
|
||||
Flux is a lightweight self-hosted micro-PaaS for hosting Golang web apps with ease. Built on top of [Buildpacks](https://buildpacks.io/) and [Docker](https://docs.docker.com/get-docker/), Flux simplifies the deployment process with a focus on similicity, speed, and reliability.
|
||||
|
||||
**Goals**:
|
||||
|
||||
- Automatic deployment of Golang web apps, simply run `flux init`, and run `flux deploy` to deploy your app!
|
||||
- Zero-downtime deployments with blue-green deployments
|
||||
- Simple but powerful configuration, flux should be able to handle most use cases, from a micro web app to a fullstack app with databases, caching layers, full text search, etc.
|
||||
|
||||
**What is flux not?**
|
||||
|
||||
- Flux is not meant to be used as a multi-tenant PaaS, it is meant to be used by trusted individuals, while flux will still have security in mind, certain things are not secure. For example, anyone can delete all your apps, so be careful, anyone who has access to your flux server can do a lot of damage.
|
||||
|
||||
**Limitations**:
|
||||
- Theoretically flux is likely limited by the amount of containers can fit in the bridge network, but I haven't tested this
|
||||
- Containers are not particularly isolated, if one malicious container wanted to scan all containers, or interact with other containers it tectically shouldnt, it totally just can (todo?)
|
||||
|
||||
## Features
|
||||
|
||||
@@ -67,7 +81,6 @@ After=network.target
|
||||
ExecStart=/usr/local/bin/fluxd
|
||||
Restart=always
|
||||
Environment=GOPATH=/var/fluxd/go
|
||||
Environment=HOME=/var/fluxd/home
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -93,11 +106,19 @@ Flux daemon looks for a confgiuration file in `/var/fluxd/config.json` but can b
|
||||
|
||||
```json
|
||||
{
|
||||
"builder": "paketobuildpacks/builder-jammy-tiny"
|
||||
"builder": "paketobuildpacks/builder-jammy-tiny",
|
||||
"disable_delete_all": false,
|
||||
"compression": {
|
||||
"enabled": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `builder`: The buildpack builder to use (default: `paketobuildpacks/builder-jammy-tiny`)
|
||||
- `disable_delete_all`: Disable the delete all deployments endpoint (default: `false`)
|
||||
- `compression`: Compression settings
|
||||
- `enabled`: Enable compression (default: `false`)
|
||||
- `level`: Compression level
|
||||
|
||||
#### Daemon Settings
|
||||
|
||||
@@ -140,18 +161,33 @@ flux.json is the configuration file in the root of your proejct that defines dep
|
||||
"name": "my-app",
|
||||
"url": "myapp.example.com",
|
||||
"port": 8080,
|
||||
"containers": [
|
||||
{
|
||||
"name": "redis",
|
||||
"image": "redis:latest",
|
||||
"volumes": [
|
||||
{
|
||||
"mountpoint": "/data"
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
"env_file": ".env",
|
||||
"environment": ["DEBUG=true"]
|
||||
}
|
||||
```
|
||||
|
||||
#### Configuration Options
|
||||
The project config files has the following options:
|
||||
|
||||
- `name`: The name of the project
|
||||
- `url`: Domain for the application
|
||||
- `port`: Web server's listening port
|
||||
- `env_file`: Path to environment variable file
|
||||
- `environment`: Additional environment variables
|
||||
| field | description | required |
|
||||
| ----- | ----------- | -------- |
|
||||
| `name` | The name of the project | true |
|
||||
| `url` | Domain for the application | true |
|
||||
| `port` | Web server's listening port | true |
|
||||
| `env_file` | Path to environment variable file | false |
|
||||
| `environment` | Additional environment variables | false |
|
||||
| `containers` | Supplemental containers to run alongside the app | false |
|
||||
| `volumes` | Volumes to mount to the app's containers | false |
|
||||
|
||||
## Deployment Notes
|
||||
|
||||
|
||||
14
cmd/cli/commands/command.go
Normal file
14
cmd/cli/commands/command.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"github.com/juls0730/flux/pkg/API"
|
||||
)
|
||||
|
||||
type CommandCtx struct {
|
||||
Config pkg.CLIConfig
|
||||
Info API.Info
|
||||
Interactive bool
|
||||
}
|
||||
|
||||
type CommandFunc func(CommandCtx, []string) error
|
||||
117
cmd/cli/commands/delete.go
Normal file
117
cmd/cli/commands/delete.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
util "github.com/juls0730/flux/internal/util/cli"
|
||||
)
|
||||
|
||||
var deleteUsage = `Usage:
|
||||
flux delete [project-name | all]
|
||||
|
||||
Options:
|
||||
project-name: The name of the project to delete
|
||||
all: Delete all projects
|
||||
|
||||
Flags:
|
||||
%s
|
||||
|
||||
Flux will delete the deployment of the app in the current directory or the specified project.`
|
||||
|
||||
func deleteAll(ctx CommandCtx, noConfirm *bool) error {
|
||||
if !*noConfirm {
|
||||
if !ctx.Interactive {
|
||||
return fmt.Errorf("delete command cannot be run non-interactively without --no-confirm")
|
||||
}
|
||||
|
||||
var response string
|
||||
fmt.Print("Are you sure you want to delete all projects? this will delete all volumes and containers associated and cannot be undone. [y/N] ")
|
||||
fmt.Scanln(&response)
|
||||
|
||||
if strings.ToLower(response) != "y" {
|
||||
fmt.Println("Aborting...")
|
||||
return nil
|
||||
}
|
||||
|
||||
response = ""
|
||||
|
||||
// since we are deleting **all** projects, I feel better asking for confirmation twice
|
||||
fmt.Printf("Are you really sure you want to delete all projects? [y/N] ")
|
||||
fmt.Scanln(&response)
|
||||
|
||||
if strings.ToLower(response) != "y" {
|
||||
fmt.Println("Aborting...")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
util.DeleteRequest(ctx.Config.DaemonURL + "/deployments")
|
||||
|
||||
fmt.Printf("Successfully deleted all projects\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteCommand(ctx CommandCtx, args []string) error {
|
||||
fs := flag.NewFlagSet("delete", flag.ExitOnError)
|
||||
fs.Usage = func() {
|
||||
var buf bytes.Buffer
|
||||
// Redirect flagset to print to buffer instead of stdout
|
||||
fs.SetOutput(&buf)
|
||||
fs.PrintDefaults()
|
||||
|
||||
fmt.Println(deleteUsage, strings.TrimRight(buf.String(), "\n"))
|
||||
}
|
||||
|
||||
noConfirm := fs.Bool("no-confirm", false, "Skip confirmation prompt")
|
||||
|
||||
err := fs.Parse(args)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
args = fs.Args()
|
||||
|
||||
if len(args) == 1 && args[0] == "all" {
|
||||
return deleteAll(ctx, noConfirm)
|
||||
}
|
||||
|
||||
project, err := util.GetProject("delete", args, ctx.Config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("\tfailed to get project name: %v.\n\tSee flux delete --help for more information", err)
|
||||
}
|
||||
|
||||
// ask for confirmation if not --no-confirm
|
||||
if !*noConfirm {
|
||||
if !ctx.Interactive {
|
||||
return fmt.Errorf("delete command cannot be run non-interactively without --no-confirm")
|
||||
}
|
||||
|
||||
fmt.Printf("Are you sure you want to delete %s? this will delete all volumes and containers associated with the deployment, and cannot be undone. \n[y/N] ", project.Name)
|
||||
var response string
|
||||
fmt.Scanln(&response)
|
||||
|
||||
if strings.ToLower(response) != "y" {
|
||||
fmt.Println("Aborting...")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err = util.DeleteRequest(ctx.Config.DaemonURL + "/app/" + project.Id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete project: %v", err)
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
// remove the .fluxid file if it exists
|
||||
os.Remove(".fluxid")
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully deleted %s\n", project.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package handlers
|
||||
package commands
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
@@ -11,13 +11,18 @@ import (
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/juls0730/flux/cmd/flux/models"
|
||||
"github.com/google/uuid"
|
||||
"github.com/joho/godotenv"
|
||||
util "github.com/juls0730/flux/internal/util/cli"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"github.com/juls0730/flux/pkg/API"
|
||||
)
|
||||
|
||||
func matchesIgnorePattern(path string, info os.FileInfo, patterns []string) bool {
|
||||
@@ -64,7 +69,7 @@ func convertGitignorePatternToRegex(pattern string) string {
|
||||
return pattern
|
||||
}
|
||||
|
||||
func compressDirectory(compression pkg.Compression) ([]byte, error) {
|
||||
func compressDirectory(compressionLevel int) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
|
||||
@@ -86,8 +91,8 @@ func compressDirectory(compression pkg.Compression) ([]byte, error) {
|
||||
}
|
||||
|
||||
var gzWriter *gzip.Writer
|
||||
if compression.Enabled {
|
||||
gzWriter, err = gzip.NewWriterLevel(&buf, compression.Level)
|
||||
if compressionLevel > 0 {
|
||||
gzWriter, err = gzip.NewWriterLevel(&buf, compressionLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -151,46 +156,147 @@ func compressDirectory(compression pkg.Compression) ([]byte, error) {
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func DeployCommand(seekingHelp bool, config models.Config, info pkg.Info, loadingSpinner *spinner.Spinner, spinnerWriter *models.CustomSpinnerWriter, args []string) error {
|
||||
if seekingHelp {
|
||||
fmt.Println(`Usage:
|
||||
flux deploy
|
||||
func preprocessEnvFile(envFile string, target *[]string) error {
|
||||
envBytes, err := os.Open(envFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open env file: %v", err)
|
||||
}
|
||||
defer envBytes.Close()
|
||||
|
||||
envVars, err := godotenv.Parse(envBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse env file: %v", err)
|
||||
}
|
||||
|
||||
for key, value := range envVars {
|
||||
*target = append(*target, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
|
||||
Flux will deploy the app in the current directory, and start routing traffic to it.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeployCommand(ctx CommandCtx, args []string) error {
|
||||
if _, err := os.Stat("flux.json"); err != nil {
|
||||
return fmt.Errorf("no flux.json found, please run flux init first")
|
||||
}
|
||||
|
||||
spinnerWriter := util.NewCustomSpinnerWriter()
|
||||
|
||||
loadingSpinner := spinner.New(spinner.CharSets[14], 100*time.Millisecond, spinner.WithWriter(spinnerWriter))
|
||||
defer func() {
|
||||
if loadingSpinner.Active() {
|
||||
loadingSpinner.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
signalChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChannel, os.Interrupt)
|
||||
go func() {
|
||||
<-signalChannel
|
||||
if loadingSpinner.Active() {
|
||||
loadingSpinner.Stop()
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
loadingSpinner.Suffix = " Deploying"
|
||||
loadingSpinner.Start()
|
||||
|
||||
buf, err := compressDirectory(info.Compression)
|
||||
buf, err := compressDirectory(ctx.Info.CompressionLevel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compress directory: %v", err)
|
||||
}
|
||||
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
configPart, err := writer.CreateFormFile("config", "flux.json")
|
||||
|
||||
if _, err := os.Stat(".fluxid"); err == nil {
|
||||
idPart, err := writer.CreateFormField("id")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create id part: %v", err)
|
||||
}
|
||||
|
||||
idFile, err := os.Open(".fluxid")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open .fluxid: %v", err)
|
||||
}
|
||||
defer idFile.Close()
|
||||
|
||||
var idBytes []byte
|
||||
if idBytes, err = io.ReadAll(idFile); err != nil {
|
||||
return fmt.Errorf("failed to read .fluxid: %v", err)
|
||||
}
|
||||
|
||||
if _, err := uuid.Parse(string(idBytes)); err != nil {
|
||||
return fmt.Errorf(".fluxid does not contain a valid uuid")
|
||||
}
|
||||
|
||||
idPart.Write(idBytes)
|
||||
}
|
||||
|
||||
configPart, err := writer.CreateFormField("config")
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create config part: %v", err)
|
||||
}
|
||||
|
||||
type FluxContainers struct {
|
||||
pkg.Container
|
||||
EnvFile string `json:"env_file,omitempty"`
|
||||
}
|
||||
|
||||
type FluxConfig struct {
|
||||
pkg.ProjectConfig
|
||||
EnvFile string `json:"env_file,omitempty"`
|
||||
Containers []FluxContainers `json:"containers,omitempty"`
|
||||
}
|
||||
|
||||
fluxConfigFile, err := os.Open("flux.json")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open flux.json: %v", err)
|
||||
}
|
||||
defer fluxConfigFile.Close()
|
||||
|
||||
if _, err := io.Copy(configPart, fluxConfigFile); err != nil {
|
||||
return fmt.Errorf("failed to write config part: %v", err)
|
||||
// Read the entire JSON file into a byte slice
|
||||
byteValue, err := io.ReadAll(fluxConfigFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read flux.json: %v", err)
|
||||
}
|
||||
|
||||
codePart, err := writer.CreateFormFile("code", "code.tar.gz")
|
||||
var fluxConfig FluxConfig
|
||||
err = json.Unmarshal(byteValue, &fluxConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal flux.json: %v", err)
|
||||
}
|
||||
|
||||
if fluxConfig.EnvFile != "" {
|
||||
if err := preprocessEnvFile(fluxConfig.EnvFile, &fluxConfig.Environment); err != nil {
|
||||
return fmt.Errorf("failed to preprocess env file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range fluxConfig.Containers {
|
||||
if container.EnvFile != "" {
|
||||
if err := preprocessEnvFile(container.EnvFile, &container.Environment); err != nil {
|
||||
return fmt.Errorf("failed to preprocess env file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// write the pre-processed flux.json to the config part
|
||||
if err := json.NewEncoder(configPart).Encode(fluxConfig); err != nil {
|
||||
return fmt.Errorf("failed to encode flux.json: %v", err)
|
||||
}
|
||||
|
||||
var codeFileName string
|
||||
if ctx.Info.CompressionLevel > 0 {
|
||||
codeFileName = "code.tar.gz"
|
||||
} else {
|
||||
codeFileName = "code.tar"
|
||||
}
|
||||
|
||||
codePart, err := writer.CreateFormFile("code", codeFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create code part: %v", err)
|
||||
}
|
||||
@@ -203,7 +309,7 @@ func DeployCommand(seekingHelp bool, config models.Config, info pkg.Info, loadin
|
||||
return fmt.Errorf("failed to close writer: %v", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", config.DeamonURL+"/deploy", body)
|
||||
req, err := http.NewRequest("POST", ctx.Config.DaemonURL+"/deploy", body)
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
if err != nil {
|
||||
@@ -216,11 +322,11 @@ func DeployCommand(seekingHelp bool, config models.Config, info pkg.Info, loadin
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
customWriter := models.NewCustomStdout(spinnerWriter)
|
||||
customWriter := util.NewCustomStdout(spinnerWriter)
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
var event string
|
||||
var data pkg.DeploymentEvent
|
||||
var data API.DeploymentEvent
|
||||
var line string
|
||||
for scanner.Scan() {
|
||||
line = scanner.Text()
|
||||
@@ -232,7 +338,19 @@ func DeployCommand(seekingHelp bool, config models.Config, info pkg.Info, loadin
|
||||
switch event {
|
||||
case "complete":
|
||||
loadingSpinner.Stop()
|
||||
fmt.Printf("App %s deployed successfully!\n", data.Message.(map[string]interface{})["name"])
|
||||
fmt.Printf("App %s deployed successfully!\n", data.Message.(map[string]any)["name"])
|
||||
if _, err := os.Stat(".fluxid"); os.IsNotExist(err) {
|
||||
idFile, err := os.Create(".fluxid")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create .fluxid: %v", err)
|
||||
}
|
||||
defer idFile.Close()
|
||||
|
||||
id := data.Message.(map[string]any)["id"].(string)
|
||||
if _, err := idFile.Write([]byte(id)); err != nil {
|
||||
return fmt.Errorf("failed to write .fluxid: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case "cmd_output":
|
||||
customWriter.Printf("... %s\n", data.Message)
|
||||
@@ -1,29 +1,48 @@
|
||||
package handlers
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/juls0730/flux/cmd/flux/models"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
)
|
||||
|
||||
func InitCommand(seekingHelp bool, config models.Config, info pkg.Info, loadingSpinner *spinner.Spinner, spinnerWriter *models.CustomSpinnerWriter, args []string) error {
|
||||
if seekingHelp {
|
||||
fmt.Println(`Usage:
|
||||
var initUsage = `Usage:
|
||||
flux init [project-name]
|
||||
|
||||
Options:
|
||||
project-name: The name of the project to initialize
|
||||
|
||||
Flux will initialize a new project in the current directory or the specified project.`)
|
||||
return nil
|
||||
Flux will initialize a new project in the current directory or the specified project.`
|
||||
|
||||
func InitCommand(ctx CommandCtx, args []string) error {
|
||||
if !ctx.Interactive {
|
||||
return fmt.Errorf("init command can only be run in interactive mode")
|
||||
}
|
||||
|
||||
fs := flag.NewFlagSet("init", flag.ExitOnError)
|
||||
fs.Usage = func() {
|
||||
var buf bytes.Buffer
|
||||
// Redirect flagset to print to buffer instead of stdout
|
||||
fs.SetOutput(&buf)
|
||||
fs.PrintDefaults()
|
||||
|
||||
fmt.Println(initUsage)
|
||||
}
|
||||
|
||||
err := fs.Parse(args)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
args = fs.Args()
|
||||
|
||||
var projectConfig pkg.ProjectConfig
|
||||
|
||||
var response string
|
||||
26
cmd/cli/commands/list.go
Normal file
26
cmd/cli/commands/list.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
util "github.com/juls0730/flux/internal/util/cli"
|
||||
"github.com/juls0730/flux/pkg/API"
|
||||
)
|
||||
|
||||
func ListCommand(ctx CommandCtx, args []string) error {
|
||||
apps, err := util.GetRequest[[]API.App](ctx.Config.DaemonURL + "/apps")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get apps: %v", err)
|
||||
}
|
||||
|
||||
if len(*apps) == 0 {
|
||||
fmt.Println("No apps found")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, app := range *apps {
|
||||
fmt.Printf("%s (%s)\n", app.Name, app.DeploymentStatus)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
25
cmd/cli/commands/start.go
Normal file
25
cmd/cli/commands/start.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
util "github.com/juls0730/flux/internal/util/cli"
|
||||
)
|
||||
|
||||
func StartCommand(ctx CommandCtx, args []string) error {
|
||||
projectName, err := util.GetProject("start", args, ctx.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Put request to start the project, since the start endpoint is idempotent.
|
||||
// If the project is already running, this will return a 304 Not Modified
|
||||
err = util.PutRequest(ctx.Config.DaemonURL+"/app/"+projectName.Id+"/start", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start %s: %v", projectName.Name, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully started %s\n", projectName.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
22
cmd/cli/commands/stop.go
Normal file
22
cmd/cli/commands/stop.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
util "github.com/juls0730/flux/internal/util/cli"
|
||||
)
|
||||
|
||||
func StopCommand(ctx CommandCtx, args []string) error {
|
||||
projectName, err := util.GetProject("stop", args, ctx.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = util.PutRequest(ctx.Config.DaemonURL+"/app/"+projectName.Id+"/stop", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stop %s: %v", projectName.Name, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully stopped %s\n", projectName.Name)
|
||||
return nil
|
||||
}
|
||||
3
cmd/cli/config.json
Normal file
3
cmd/cli/config.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"daemon_url": "http://127.0.0.1:5647"
|
||||
}
|
||||
233
cmd/cli/main.go
Normal file
233
cmd/cli/main.go
Normal file
@@ -0,0 +1,233 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/agnivade/levenshtein"
|
||||
"github.com/juls0730/flux/cmd/cli/commands"
|
||||
util "github.com/juls0730/flux/internal/util/cli"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"github.com/juls0730/flux/pkg/API"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
func isInteractive() bool {
|
||||
return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd())
|
||||
}
|
||||
|
||||
//go:embed config.json
|
||||
var config []byte
|
||||
|
||||
var configPath = filepath.Join(os.Getenv("HOME"), "/.config/flux")
|
||||
|
||||
var version = pkg.Version
|
||||
|
||||
var helpStr = `Usage:
|
||||
flux <command>
|
||||
|
||||
Available Commands:
|
||||
%s
|
||||
|
||||
Available Flags:
|
||||
--help, -h: Show this help message
|
||||
|
||||
Use "flux <command> --help" for more information about a command.
|
||||
`
|
||||
|
||||
var maxDistance = 3
|
||||
|
||||
type Command struct {
|
||||
Help string
|
||||
HandlerFunc commands.CommandFunc
|
||||
}
|
||||
|
||||
type CommandHandler struct {
|
||||
commands map[string]Command
|
||||
aliases map[string]string
|
||||
}
|
||||
|
||||
func NewCommandHandler() CommandHandler {
|
||||
return CommandHandler{
|
||||
commands: make(map[string]Command),
|
||||
aliases: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *CommandHandler) RegisterCmd(name string, handler commands.CommandFunc, help string) {
|
||||
coomand := Command{
|
||||
Help: help,
|
||||
HandlerFunc: handler,
|
||||
}
|
||||
|
||||
h.commands[name] = coomand
|
||||
}
|
||||
|
||||
func (h *CommandHandler) RegisterAlias(alias string, command string) {
|
||||
h.aliases[alias] = command
|
||||
}
|
||||
|
||||
// returns the command and whether or not it exists
|
||||
func (h *CommandHandler) GetCommand(command string) (Command, bool) {
|
||||
if command, ok := h.aliases[command]; ok {
|
||||
return h.commands[command], true
|
||||
}
|
||||
|
||||
commandStruct, ok := h.commands[command]
|
||||
return commandStruct, ok
|
||||
}
|
||||
|
||||
var helpPadding = 13
|
||||
|
||||
func (h *CommandHandler) GetHelp() {
|
||||
commandsStr := ""
|
||||
for command := range h.commands {
|
||||
curLine := ""
|
||||
|
||||
curLine += command
|
||||
for alias, aliasCommand := range h.aliases {
|
||||
if aliasCommand == command {
|
||||
curLine += fmt.Sprintf(", %s", alias)
|
||||
}
|
||||
}
|
||||
|
||||
curLine += strings.Repeat(" ", helpPadding-(len(curLine)-2))
|
||||
commandsStr += fmt.Sprintf(" %s %s\n", curLine, h.commands[command].Help)
|
||||
}
|
||||
|
||||
fmt.Printf(helpStr, strings.TrimRight(commandsStr, "\n"))
|
||||
}
|
||||
|
||||
func (h *CommandHandler) GetHelpCmd(commands.CommandCtx, []string) error {
|
||||
h.GetHelp()
|
||||
return nil
|
||||
}
|
||||
|
||||
func runCommand(command string, args []string, config pkg.CLIConfig, info API.Info, cmdHandler CommandHandler) error {
|
||||
commandCtx := commands.CommandCtx{
|
||||
Config: config,
|
||||
Info: info,
|
||||
Interactive: isInteractive(),
|
||||
}
|
||||
|
||||
commandStruct, ok := cmdHandler.commands[command]
|
||||
if ok {
|
||||
return commandStruct.HandlerFunc(commandCtx, args)
|
||||
}
|
||||
|
||||
// diff the command against the list of commands and if we find a command that is more than 80% similar, ask if that's what the user meant
|
||||
var closestMatch struct {
|
||||
name string
|
||||
score int
|
||||
}
|
||||
for cmdName := range cmdHandler.commands {
|
||||
distance := levenshtein.ComputeDistance(cmdName, command)
|
||||
|
||||
if distance <= maxDistance {
|
||||
if closestMatch.name == "" || distance < closestMatch.score {
|
||||
closestMatch.name = cmdName
|
||||
closestMatch.score = distance
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if closestMatch.name == "" {
|
||||
return fmt.Errorf("unknown command: %s", command)
|
||||
}
|
||||
|
||||
var response string
|
||||
// new line ommitted because it will be produced when the user presses enter to submit their response
|
||||
fmt.Printf("No command found with the name '%s'. Did you mean '%s'? (y/N)", command, closestMatch.name)
|
||||
fmt.Scanln(&response)
|
||||
|
||||
if strings.ToLower(response) == "y" || strings.ToLower(response) == "yes" {
|
||||
command = closestMatch.name
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
// re-run command after accepting the suggestion
|
||||
return runCommand(command, args, config, info, cmdHandler)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if !isInteractive() {
|
||||
fmt.Printf("Flux is being run non-interactively\n")
|
||||
}
|
||||
|
||||
cmdHandler := NewCommandHandler()
|
||||
|
||||
cmdHandler.RegisterCmd("init", commands.InitCommand, "Initialize a new project")
|
||||
cmdHandler.RegisterCmd("deploy", commands.DeployCommand, "Deploy a new version of the app")
|
||||
cmdHandler.RegisterCmd("start", commands.StartCommand, "Start the app")
|
||||
cmdHandler.RegisterCmd("stop", commands.StopCommand, "Stop the app")
|
||||
cmdHandler.RegisterCmd("list", commands.ListCommand, "List all the apps")
|
||||
cmdHandler.RegisterCmd("delete", commands.DeleteCommand, "Delete the app")
|
||||
|
||||
fs := flag.NewFlagSet("flux", flag.ExitOnError)
|
||||
fs.Usage = func() {
|
||||
cmdHandler.GetHelp()
|
||||
}
|
||||
|
||||
err := fs.Parse(os.Args[1:])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
cmdHandler.GetHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(configPath, "config.json")); err != nil {
|
||||
if err := os.MkdirAll(configPath, 0755); err != nil {
|
||||
fmt.Printf("Failed to create config directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = os.WriteFile(filepath.Join(configPath, "config.json"), config, 0644); err != nil {
|
||||
fmt.Printf("Failed to write config file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var config pkg.CLIConfig
|
||||
configBytes, err := os.ReadFile(filepath.Join(configPath, "config.json"))
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read config file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(configBytes, &config); err != nil {
|
||||
fmt.Printf("Failed to parse config file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if config.DaemonURL == "" {
|
||||
fmt.Printf("Daemon URL is empty\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
info, err := util.GetRequest[API.Info](config.DaemonURL + "/heartbeat")
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to connect to daemon\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if info.Version != version {
|
||||
fmt.Printf("Version mismatch, daemon is running version %s, but you are running version %s\n", info.Version, version)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = runCommand(os.Args[1], fs.Args()[1:], config, *info, cmdHandler)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
35
cmd/daemon/main.go
Normal file
35
cmd/daemon/main.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
|
||||
"github.com/juls0730/flux/internal/handlers"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fluxServer := handlers.NewServer()
|
||||
defer fluxServer.Stop()
|
||||
|
||||
http.HandleFunc("POST /deploy", fluxServer.DeployNewApp)
|
||||
|
||||
http.HandleFunc("GET /apps", fluxServer.GetAllApps)
|
||||
http.HandleFunc("GET /app/by-name/{name}", fluxServer.GetAppByName)
|
||||
http.HandleFunc("GET /app/by-id/{id}", fluxServer.GetAppById)
|
||||
|
||||
http.HandleFunc("PUT /app/{id}/start", fluxServer.StartApp)
|
||||
http.HandleFunc("PUT /app/{id}/stop", fluxServer.StopApp)
|
||||
|
||||
http.HandleFunc("DELETE /apps", fluxServer.DeleteAllDeploymentsHandler)
|
||||
http.HandleFunc("DELETE /app/{id}", fluxServer.DeleteDeployHandler)
|
||||
|
||||
http.HandleFunc("GET /heartbeat", fluxServer.DaemonInfoHandler)
|
||||
|
||||
err := fluxServer.ListenAndServe()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to start server: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"deamon_url": "http://127.0.0.1:5647"
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/juls0730/flux/cmd/flux/models"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
)
|
||||
|
||||
func DeleteCommand(seekingHelp bool, config models.Config, info pkg.Info, loadingSpinner *spinner.Spinner, spinnerWriter *models.CustomSpinnerWriter, args []string) error {
|
||||
if seekingHelp {
|
||||
fmt.Println(`Usage:
|
||||
flux delete [project-name | all]
|
||||
|
||||
Options:
|
||||
project-name: The name of the project to delete
|
||||
all: Delete all projects
|
||||
|
||||
Flux will delete the deployment of the app in the current directory or the specified project.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(args) == 1 {
|
||||
if args[0] == "all" {
|
||||
var response string
|
||||
fmt.Print("Are you sure you want to delete all projects? this will delete all volumes and containers associated and cannot be undone. \n[y/N] ")
|
||||
fmt.Scanln(&response)
|
||||
|
||||
if strings.ToLower(response) != "y" {
|
||||
fmt.Println("Aborting...")
|
||||
return nil
|
||||
}
|
||||
|
||||
response = ""
|
||||
|
||||
fmt.Printf("Are you really sure you want to delete all projects? \n[y/N] ")
|
||||
fmt.Scanln(&response)
|
||||
|
||||
if strings.ToLower(response) != "y" {
|
||||
fmt.Println("Aborting...")
|
||||
return nil
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("DELETE", config.DeamonURL+"/deployments", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete deployments: %v", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete deployments: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
responseBody = []byte(strings.TrimSuffix(string(responseBody), "\n"))
|
||||
|
||||
return fmt.Errorf("delete failed: %s", responseBody)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully deleted all projects\n")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
projectName, err := GetProjectName("delete", args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ask for confirmation
|
||||
fmt.Printf("Are you sure you want to delete %s? this will delete all volumes and containers associated with the deployment, and cannot be undone. \n[y/N] ", projectName)
|
||||
var response string
|
||||
fmt.Scanln(&response)
|
||||
|
||||
if strings.ToLower(response) != "y" {
|
||||
fmt.Println("Aborting...")
|
||||
return nil
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("DELETE", config.DeamonURL+"/deployments/"+projectName, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete app: %v", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete app: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
responseBody = []byte(strings.TrimSuffix(string(responseBody), "\n"))
|
||||
|
||||
return fmt.Errorf("delete failed: %s", responseBody)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully deleted %s\n", projectName)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/juls0730/flux/cmd/flux/models"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
)
|
||||
|
||||
func ListCommand(seekingHelp bool, config models.Config, info pkg.Info, loadingSpinner *spinner.Spinner, spinnerWriter *models.CustomSpinnerWriter, args []string) error {
|
||||
if seekingHelp {
|
||||
fmt.Println(`Usage:
|
||||
flux list
|
||||
|
||||
Flux will list all the apps in the daemon.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
resp, err := http.Get(config.DeamonURL + "/apps")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get apps: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
responseBody = []byte(strings.TrimSuffix(string(responseBody), "\n"))
|
||||
|
||||
return fmt.Errorf("list failed: %s", responseBody)
|
||||
}
|
||||
|
||||
var apps []pkg.App
|
||||
if err := json.NewDecoder(resp.Body).Decode(&apps); err != nil {
|
||||
return fmt.Errorf("failed to decode apps: %v", err)
|
||||
}
|
||||
|
||||
if len(apps) == 0 {
|
||||
fmt.Println("No apps found")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, app := range apps {
|
||||
fmt.Printf("%s (%s)\n", app.Name, app.DeploymentStatus)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/juls0730/flux/pkg"
|
||||
)
|
||||
|
||||
func GetProjectName(command string, args []string) (string, error) {
|
||||
var projectName string
|
||||
|
||||
if len(args) == 0 {
|
||||
if _, err := os.Stat("flux.json"); err != nil {
|
||||
return "", fmt.Errorf("usage: flux %[1]s <app name>, or run flux %[1]s in the project directory", command)
|
||||
}
|
||||
|
||||
fluxConfigFile, err := os.Open("flux.json")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to open flux.json: %v", err)
|
||||
}
|
||||
defer fluxConfigFile.Close()
|
||||
|
||||
var config pkg.ProjectConfig
|
||||
if err := json.NewDecoder(fluxConfigFile).Decode(&config); err != nil {
|
||||
return "", fmt.Errorf("failed to decode flux.json: %v", err)
|
||||
}
|
||||
|
||||
projectName = config.Name
|
||||
} else {
|
||||
projectName = args[0]
|
||||
}
|
||||
|
||||
return projectName, nil
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/juls0730/flux/cmd/flux/models"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
)
|
||||
|
||||
func StartCommand(seekingHelp bool, config models.Config, info pkg.Info, loadingSpinner *spinner.Spinner, spinnerWriter *models.CustomSpinnerWriter, args []string) error {
|
||||
if seekingHelp {
|
||||
fmt.Println(`Usage:
|
||||
flux start
|
||||
|
||||
Flux will start the deployment of the app in the current directory.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
projectName, err := GetProjectName("start", args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.Post(config.DeamonURL+"/start/"+projectName, "application/json", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start app: %v", err)
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
if req.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
responseBody = []byte(strings.TrimSuffix(string(responseBody), "\n"))
|
||||
|
||||
return fmt.Errorf("start failed: %s", responseBody)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully started %s\n", projectName)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/juls0730/flux/cmd/flux/models"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
)
|
||||
|
||||
func StopCommand(seekingHelp bool, config models.Config, info pkg.Info, loadingSpinner *spinner.Spinner, spinnerWriter *models.CustomSpinnerWriter, args []string) error {
|
||||
if seekingHelp {
|
||||
fmt.Println(`Usage:
|
||||
flux stop
|
||||
|
||||
Flux will stop the deployment of the app in the current directory.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
projectName, err := GetProjectName("stop", args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.Post(config.DeamonURL+"/stop/"+projectName, "application/json", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stop app: %v", err)
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
if req.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
responseBody = []byte(strings.TrimSuffix(string(responseBody), "\n"))
|
||||
|
||||
return fmt.Errorf("stop failed: %s", responseBody)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully stopped %s\n", projectName)
|
||||
return nil
|
||||
}
|
||||
197
cmd/flux/main.go
197
cmd/flux/main.go
@@ -1,197 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/agnivade/levenshtein"
|
||||
"github.com/briandowns/spinner"
|
||||
"github.com/juls0730/flux/cmd/flux/handlers"
|
||||
"github.com/juls0730/flux/cmd/flux/models"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
)
|
||||
|
||||
//go:embed config.json
|
||||
var config []byte
|
||||
|
||||
var configPath = filepath.Join(os.Getenv("HOME"), "/.config/flux")
|
||||
|
||||
var helpStr = `Usage:
|
||||
flux <command>
|
||||
|
||||
Available Commands:
|
||||
init Initialize a new project
|
||||
deploy Deploy a new version of the app
|
||||
stop Stop a container
|
||||
start Start a container
|
||||
delete Delete a container
|
||||
list List all containers
|
||||
|
||||
Flags:
|
||||
-h, --help help for flux
|
||||
|
||||
Use "flux <command> --help" for more information about a command.`
|
||||
|
||||
var maxDistance = 3
|
||||
|
||||
type CommandHandler struct {
|
||||
commands map[string]func(bool, models.Config, pkg.Info, *spinner.Spinner, *models.CustomSpinnerWriter, []string) error
|
||||
}
|
||||
|
||||
func (h *CommandHandler) RegisterCmd(name string, handler func(bool, models.Config, pkg.Info, *spinner.Spinner, *models.CustomSpinnerWriter, []string) error) {
|
||||
h.commands[name] = handler
|
||||
}
|
||||
|
||||
func runCommand(command string, args []string, config models.Config, info pkg.Info, cmdHandler CommandHandler, try int) error {
|
||||
if try == 2 {
|
||||
return fmt.Errorf("unknown command: %s", command)
|
||||
}
|
||||
|
||||
seekingHelp := false
|
||||
if len(args) > 0 && (args[len(args)-1] == "--help" || args[len(args)-1] == "-h") {
|
||||
seekingHelp = true
|
||||
args = args[:len(args)-1]
|
||||
}
|
||||
|
||||
spinnerWriter := models.NewCustomSpinnerWriter()
|
||||
|
||||
loadingSpinner := spinner.New(spinner.CharSets[14], 100*time.Millisecond, spinner.WithWriter(spinnerWriter))
|
||||
defer func() {
|
||||
if loadingSpinner.Active() {
|
||||
loadingSpinner.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
signalChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChannel, os.Interrupt)
|
||||
go func() {
|
||||
<-signalChannel
|
||||
if loadingSpinner.Active() {
|
||||
loadingSpinner.Stop()
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
handler, ok := cmdHandler.commands[command]
|
||||
if ok {
|
||||
return handler(seekingHelp, config, info, loadingSpinner, spinnerWriter, args)
|
||||
}
|
||||
|
||||
// diff the command against the list of commands and if we find a command that is more than 80% similar, ask if that's what the user meant
|
||||
var closestMatch struct {
|
||||
name string
|
||||
score int
|
||||
}
|
||||
for cmdName := range cmdHandler.commands {
|
||||
distance := levenshtein.ComputeDistance(cmdName, command)
|
||||
|
||||
if distance <= maxDistance {
|
||||
if closestMatch.name == "" || distance < closestMatch.score {
|
||||
closestMatch.name = cmdName
|
||||
closestMatch.score = distance
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if closestMatch.name == "" {
|
||||
return fmt.Errorf("unknown command: %s", command)
|
||||
}
|
||||
|
||||
var response string
|
||||
fmt.Printf("No command found with the name '%s'. Did you mean '%s'?\n", command, closestMatch.name)
|
||||
fmt.Scanln(&response)
|
||||
|
||||
if strings.ToLower(response) == "y" || strings.ToLower(response) == "yes" {
|
||||
command = closestMatch.name
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
return runCommand(command, args, config, info, cmdHandler, try+1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
fmt.Println(helpStr)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if os.Args[1] == "--help" || os.Args[1] == "-h" {
|
||||
fmt.Println(helpStr)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(configPath, "config.json")); err != nil {
|
||||
if err := os.MkdirAll(configPath, 0755); err != nil {
|
||||
fmt.Printf("Failed to create config directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = os.WriteFile(filepath.Join(configPath, "config.json"), config, 0644); err != nil {
|
||||
fmt.Printf("Failed to write config file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var config models.Config
|
||||
configBytes, err := os.ReadFile(filepath.Join(configPath, "config.json"))
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read config file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(configBytes, &config); err != nil {
|
||||
fmt.Printf("Failed to parse config file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
command := os.Args[1]
|
||||
args := os.Args[2:]
|
||||
|
||||
resp, err := http.Get(config.DeamonURL + "/heartbeat")
|
||||
if err != nil {
|
||||
fmt.Println("Failed to connect to daemon")
|
||||
os.Exit(1)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
fmt.Println("Failed to connect to daemon")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var info pkg.Info
|
||||
err = json.NewDecoder(resp.Body).Decode(&info)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to decode info: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
fmt.Println("Failed to connect to daemon")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
cmdHandler := CommandHandler{
|
||||
commands: make(map[string]func(bool, models.Config, pkg.Info, *spinner.Spinner, *models.CustomSpinnerWriter, []string) error),
|
||||
}
|
||||
|
||||
cmdHandler.RegisterCmd("deploy", handlers.DeployCommand)
|
||||
cmdHandler.RegisterCmd("stop", handlers.StopCommand)
|
||||
cmdHandler.RegisterCmd("start", handlers.StartCommand)
|
||||
cmdHandler.RegisterCmd("delete", handlers.DeleteCommand)
|
||||
cmdHandler.RegisterCmd("init", handlers.InitCommand)
|
||||
|
||||
err = runCommand(command, args, config, info, cmdHandler, 0)
|
||||
if err != nil {
|
||||
fmt.Printf("%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
package models
|
||||
|
||||
type Config struct {
|
||||
DeamonURL string `json:"deamon_url"`
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/juls0730/flux/server"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fluxServer := server.NewServer()
|
||||
defer fluxServer.Stop()
|
||||
|
||||
http.HandleFunc("POST /deploy", fluxServer.DeployHandler)
|
||||
http.HandleFunc("DELETE /deployments", fluxServer.DeleteAllDeploymentsHandler)
|
||||
http.HandleFunc("DELETE /deployments/{name}", fluxServer.DeleteDeployHandler)
|
||||
http.HandleFunc("POST /start/{name}", fluxServer.StartDeployHandler)
|
||||
http.HandleFunc("POST /stop/{name}", fluxServer.StopDeployHandler)
|
||||
http.HandleFunc("GET /apps", fluxServer.ListAppsHandler)
|
||||
http.HandleFunc("GET /heartbeat", fluxServer.DaemonInfoHandler)
|
||||
|
||||
fluxServer.Logger.Info("Fluxd started on http://127.0.0.1:5647")
|
||||
err := http.ListenAndServe(":5647", nil)
|
||||
if err != nil {
|
||||
fluxServer.Logger.Fatalf("Failed to start server: %v", zap.Error(err))
|
||||
}
|
||||
}
|
||||
3
go.mod
3
go.mod
@@ -5,6 +5,7 @@ go 1.23.3
|
||||
require (
|
||||
github.com/briandowns/spinner v1.23.1
|
||||
github.com/docker/docker v27.3.1+incompatible
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
)
|
||||
@@ -24,7 +25,7 @@ require (
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||
github.com/mattn/go-isatty v0.0.8 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -48,8 +48,9 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
@@ -118,6 +119,7 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
|
||||
172
internal/docker/container.go
Normal file
172
internal/docker/container.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
dockerTypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/pkg/namesgenerator"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type DockerContainer struct {
|
||||
ID DockerID
|
||||
Name string
|
||||
Volumes []*DockerVolume
|
||||
}
|
||||
|
||||
// Creates a container in the docker daemon and returns the descriptor for the container
|
||||
func (d *DockerClient) CreateDockerContainer(ctx context.Context, imageName string, vols []*DockerVolume, environment []string, hosts []string, name *string) (*DockerContainer, error) {
|
||||
for _, host := range hosts {
|
||||
if host == ":" {
|
||||
return nil, fmt.Errorf("invalid host %s", host)
|
||||
}
|
||||
}
|
||||
|
||||
if name == nil {
|
||||
containerName := fmt.Sprintf("flux-%s", namesgenerator.GetRandomName(0))
|
||||
name = &containerName
|
||||
}
|
||||
d.logger.Debugw("Creating container", zap.String("container_name", *name))
|
||||
mounts := make([]mount.Mount, len(vols))
|
||||
volumes := make(map[string]struct{}, len(vols))
|
||||
for i, volume := range vols {
|
||||
volumes[volume.VolumeID] = struct{}{}
|
||||
|
||||
mounts[i] = mount.Mount{
|
||||
Type: mount.TypeVolume,
|
||||
Source: volume.VolumeID,
|
||||
Target: volume.Mountpoint,
|
||||
ReadOnly: false,
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := d.client.ContainerCreate(ctx, &container.Config{
|
||||
Image: imageName,
|
||||
Env: environment,
|
||||
Volumes: volumes,
|
||||
Labels: map[string]string{
|
||||
"managed-by": "flux",
|
||||
},
|
||||
},
|
||||
&container.HostConfig{
|
||||
RestartPolicy: container.RestartPolicy{Name: container.RestartPolicyUnlessStopped},
|
||||
NetworkMode: "bridge",
|
||||
Mounts: mounts,
|
||||
ExtraHosts: hosts,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
*name,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &DockerContainer{
|
||||
ID: DockerID(resp.ID),
|
||||
Name: *name,
|
||||
Volumes: vols,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (d *DockerClient) ContainerRemove(ctx context.Context, containerID DockerID, options container.RemoveOptions) error {
|
||||
d.logger.Debugw("Removing container", zap.String("container_id", string(containerID)))
|
||||
return d.client.ContainerRemove(ctx, string(containerID), options)
|
||||
}
|
||||
|
||||
func (d *DockerClient) StartContainer(ctx context.Context, containerID DockerID) error {
|
||||
return d.client.ContainerStart(ctx, string(containerID), container.StartOptions{})
|
||||
}
|
||||
|
||||
// blocks until the container returns a 200 status code
|
||||
func (d *DockerClient) ContainerWait(ctx context.Context, containerID DockerID, port uint16) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("container failed to become ready in time")
|
||||
|
||||
default:
|
||||
containerJSON, err := d.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if containerJSON.State.Running {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s:%d/", containerJSON.NetworkSettings.IPAddress, port))
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DockerClient) DeleteDockerContainer(ctx context.Context, containerID DockerID) error {
|
||||
d.logger.Debugw("Removing container", zap.String("container_id", string(containerID)))
|
||||
return d.client.ContainerRemove(ctx, string(containerID), container.RemoveOptions{})
|
||||
}
|
||||
|
||||
func (d *DockerClient) GetContainerIp(containerID DockerID) (string, error) {
|
||||
containerJSON, err := d.client.ContainerInspect(context.Background(), string(containerID))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ip := containerJSON.NetworkSettings.IPAddress
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
type ContainerStatus struct {
|
||||
// Can be "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
Status string
|
||||
ExitCode int
|
||||
}
|
||||
|
||||
func (d *DockerClient) GetContainerStatus(containerID DockerID) (*ContainerStatus, error) {
|
||||
containerJSON, err := d.client.ContainerInspect(context.Background(), string(containerID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
containerStatus := &ContainerStatus{
|
||||
Status: containerJSON.State.Status,
|
||||
ExitCode: containerJSON.State.ExitCode,
|
||||
}
|
||||
|
||||
return containerStatus, nil
|
||||
}
|
||||
|
||||
func (d *DockerClient) StopContainer(ctx context.Context, containerID DockerID) error {
|
||||
d.logger.Debugw("Stopping container", zap.String("container_id", string(containerID[:12])))
|
||||
return d.client.ContainerStop(ctx, string(containerID), container.StopOptions{})
|
||||
}
|
||||
|
||||
func (d *DockerClient) ImagePull(ctx context.Context, imageName string, options image.PullOptions) (io.ReadCloser, error) {
|
||||
d.logger.Debugw("Pulling image", zap.String("image", imageName))
|
||||
return d.client.ImagePull(ctx, imageName, options)
|
||||
}
|
||||
|
||||
func (d *DockerClient) ContainerInspect(ctx context.Context, containerID DockerID) (dockerTypes.ContainerJSON, error) {
|
||||
d.logger.Debugw("Inspecting container", zap.String("container_id", string(containerID)))
|
||||
return d.client.ContainerInspect(ctx, string(containerID))
|
||||
}
|
||||
|
||||
func (d *DockerClient) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error {
|
||||
d.logger.Debugw("Starting container", zap.String("container_id", containerID))
|
||||
return d.client.ContainerStart(ctx, containerID, options)
|
||||
}
|
||||
29
internal/docker/docker.go
Normal file
29
internal/docker/docker.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/client"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type DockerID string
|
||||
|
||||
// structure that holds the docker daemon information
|
||||
type DockerClient struct {
|
||||
client *client.Client
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func NewDocker(rawDockerClient *client.Client, logger *zap.SugaredLogger) *DockerClient {
|
||||
if rawDockerClient == nil {
|
||||
var err error
|
||||
rawDockerClient, err = client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
logger.Fatalw("Failed to create docker client", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return &DockerClient{
|
||||
client: rawDockerClient,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
37
internal/docker/volume.go
Normal file
37
internal/docker/volume.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type DockerVolume struct {
|
||||
VolumeID string
|
||||
Mountpoint string
|
||||
}
|
||||
|
||||
func (d *DockerClient) CreateDockerVolume(ctx context.Context) (vol *DockerVolume, err error) {
|
||||
dockerVolume, err := d.client.VolumeCreate(ctx, volume.CreateOptions{
|
||||
Driver: "local",
|
||||
DriverOpts: map[string]string{},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create volume: %v", err)
|
||||
}
|
||||
|
||||
d.logger.Debugw("Volume created", zap.String("volume_id", dockerVolume.Name), zap.String("mountpoint", dockerVolume.Mountpoint))
|
||||
|
||||
vol = &DockerVolume{
|
||||
VolumeID: dockerVolume.Name,
|
||||
}
|
||||
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
func (d *DockerClient) DeleteDockerVolume(ctx context.Context, volumeID string) error {
|
||||
d.logger.Debugw("Removing volume", zap.String("volume_id", volumeID))
|
||||
return d.client.VolumeRemove(ctx, volumeID, true)
|
||||
}
|
||||
594
internal/handlers/app.go
Normal file
594
internal/handlers/app.go
Normal file
@@ -0,0 +1,594 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/namesgenerator"
|
||||
"github.com/google/uuid"
|
||||
"github.com/joho/godotenv"
|
||||
proxyManagerService "github.com/juls0730/flux/internal/services/proxy"
|
||||
"github.com/juls0730/flux/internal/util"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"github.com/juls0730/flux/pkg/API"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var deploymentLock *util.MutexLock[uuid.UUID] = util.NewMutexLock[uuid.UUID]()
|
||||
|
||||
func (flux *FluxServer) DeployNewApp(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "test/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
|
||||
err := r.ParseMultipartForm(10 << 32) // 10 GiB
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to parse multipart form", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var deployRequest API.DeployRequest
|
||||
projectConfig := new(pkg.ProjectConfig)
|
||||
if err := json.Unmarshal([]byte(r.FormValue("config")), &projectConfig); err != nil {
|
||||
flux.logger.Errorw("Failed to decode config", zap.Error(err))
|
||||
|
||||
http.Error(w, "Invalid flux.json", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
deployRequest.Config = *projectConfig
|
||||
idStr := r.FormValue("id")
|
||||
|
||||
if idStr == "" {
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to generate uuid", zap.Error(err))
|
||||
http.Error(w, "Failed to generate uuid", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
deployRequest.Id = id
|
||||
} else {
|
||||
deployRequest.Id, err = uuid.Parse(idStr)
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to parse uuid", zap.Error(err))
|
||||
http.Error(w, "Failed to parse uuid", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// make sure the id exists in the database
|
||||
app := flux.appManager.GetApp(deployRequest.Id)
|
||||
if app == nil {
|
||||
http.Error(w, "App not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx, err := deploymentLock.Lock(deployRequest.Id, r.Context())
|
||||
if err != nil && err == util.ErrLocked {
|
||||
// This will happen if the app is already being deployed
|
||||
http.Error(w, "Cannot deploy app, it's already being deployed", http.StatusConflict)
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
deploymentLock.Unlock(deployRequest.Id)
|
||||
}()
|
||||
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
http.Error(w, "Streaming unsupported!", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusMultiStatus)
|
||||
|
||||
eventChannel := make(chan API.DeploymentEvent, 10)
|
||||
defer close(eventChannel)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
// make sure the connection doesnt close while there are SSE events being sent
|
||||
defer wg.Wait()
|
||||
|
||||
wg.Add(1)
|
||||
go func(w http.ResponseWriter, flusher http.Flusher) {
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case event, ok := <-eventChannel:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ev := API.DeploymentEvent{
|
||||
Message: event.Message,
|
||||
}
|
||||
|
||||
eventJSON, err := json.Marshal(ev)
|
||||
if err != nil {
|
||||
// Write error directly to ResponseWriter
|
||||
jsonErr := json.NewEncoder(w).Encode(err)
|
||||
if jsonErr != nil {
|
||||
fmt.Fprint(w, "data: {\"message\": \"Error encoding error\"}\n\n")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "data: %s\n\n", err.Error())
|
||||
if flusher != nil {
|
||||
flusher.Flush()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "event: %s\n", event.Stage)
|
||||
fmt.Fprintf(w, "data: %s\n\n", eventJSON)
|
||||
if flusher != nil {
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
if event.Stage == "error" || event.Stage == "complete" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}(w, flusher)
|
||||
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "start",
|
||||
Message: "Uploading code",
|
||||
}
|
||||
|
||||
deployRequest.Code, _, err = r.FormFile("code")
|
||||
if err != nil {
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: "No code archive found",
|
||||
}
|
||||
return
|
||||
}
|
||||
defer deployRequest.Code.Close()
|
||||
|
||||
if projectConfig.Name == "" || projectConfig.Url == "" || projectConfig.Port == 0 {
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: "Invalid flux.json, a name, url, and port must be specified",
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if projectConfig.Name == "all" {
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: "Reserved name 'all' is not allowed",
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
flux.logger.Infow("Deploying project", zap.String("name", projectConfig.Name), zap.String("url", projectConfig.Url), zap.String("id", deployRequest.Id.String()))
|
||||
|
||||
projectPath, err := flux.UploadAppCode(deployRequest.Code, deployRequest.Id)
|
||||
if err != nil {
|
||||
flux.logger.Infow("Failed to upload code", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to upload code: %s", err),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if projectConfig.EnvFile != "" {
|
||||
envPath := filepath.Join(projectPath, projectConfig.EnvFile)
|
||||
// prevent path traversal
|
||||
realEnvPath, err := filepath.EvalSymlinks(envPath)
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to eval symlinks", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to eval symlinks: %s", err),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(realEnvPath, projectPath) {
|
||||
flux.logger.Errorw("Env file is not in project directory", zap.String("env_file", projectConfig.EnvFile))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Env file is not in project directory: %s", projectConfig.EnvFile),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
envBytes, err := os.Open(realEnvPath)
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to open env file", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to open env file: %v", err),
|
||||
}
|
||||
return
|
||||
}
|
||||
defer envBytes.Close()
|
||||
|
||||
envVars, err := godotenv.Parse(envBytes)
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to parse env file", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to parse env file: %v", err),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for key, value := range envVars {
|
||||
projectConfig.Environment = append(projectConfig.Environment, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
}
|
||||
|
||||
// pipe the output of the build process to the event channel
|
||||
pipeGroup := sync.WaitGroup{}
|
||||
streamPipe := func(pipe io.ReadCloser) {
|
||||
pipeGroup.Add(1)
|
||||
defer pipeGroup.Done()
|
||||
defer pipe.Close()
|
||||
|
||||
scanner := bufio.NewScanner(pipe)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "cmd_output",
|
||||
Message: line,
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to read pipe: %s", err),
|
||||
}
|
||||
flux.logger.Errorw("Error reading pipe", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
flux.logger.Debugw("Preparing project", zap.String("name", projectConfig.Name), zap.String("id", deployRequest.Id.String()))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "preparing",
|
||||
Message: "Preparing project",
|
||||
}
|
||||
|
||||
// redirect stdout and stderr to the event channel
|
||||
reader, writer := io.Pipe()
|
||||
prepareCmd := exec.Command("go", "generate")
|
||||
prepareCmd.Dir = projectPath
|
||||
prepareCmd.Stdout = writer
|
||||
prepareCmd.Stderr = writer
|
||||
|
||||
err = prepareCmd.Start()
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to prepare project", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to prepare project: %s", err),
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
go streamPipe(reader)
|
||||
|
||||
pipeGroup.Wait()
|
||||
|
||||
err = prepareCmd.Wait()
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to prepare project", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to prepare project: %s", err),
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Close()
|
||||
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "building",
|
||||
Message: "Building project image",
|
||||
}
|
||||
|
||||
reader, writer = io.Pipe()
|
||||
flux.logger.Debugw("Building image for project", zap.String("name", projectConfig.Name))
|
||||
imageName := fmt.Sprintf("fluxi-%s", namesgenerator.GetRandomName(0))
|
||||
buildCmd := exec.Command("pack", "build", imageName, "--builder", flux.config.Builder)
|
||||
buildCmd.Dir = projectPath
|
||||
buildCmd.Stdout = writer
|
||||
buildCmd.Stderr = writer
|
||||
|
||||
err = buildCmd.Start()
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to build image", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to build image: %s", err),
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
go streamPipe(reader)
|
||||
|
||||
pipeGroup.Wait()
|
||||
|
||||
err = buildCmd.Wait()
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to build image", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to build image: %s", err),
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
app := flux.appManager.GetApp(deployRequest.Id)
|
||||
|
||||
if app == nil {
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "creating",
|
||||
Message: "Creating app, this might take a while...",
|
||||
}
|
||||
|
||||
app, err = flux.appManager.CreateApp(r.Context(), imageName, projectConfig, deployRequest.Id)
|
||||
} else {
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "upgrading",
|
||||
Message: "Upgrading app, this might take a while...",
|
||||
}
|
||||
|
||||
// we dont need to change `app` since this upgrade will use the same app and update it in place
|
||||
err = flux.appManager.Upgrade(r.Context(), app.Id, imageName, projectConfig)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to deploy app", zap.Error(err))
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to upgrade app: %s", err),
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var extApp API.App
|
||||
extApp.Id = app.Id
|
||||
extApp.Name = app.Name
|
||||
extApp.DeploymentID = app.DeploymentID
|
||||
|
||||
eventChannel <- API.DeploymentEvent{
|
||||
Stage: "complete",
|
||||
Message: extApp,
|
||||
}
|
||||
|
||||
flux.logger.Infow("App deployed successfully", zap.String("id", app.Id.String()))
|
||||
}
|
||||
|
||||
func (flux *FluxServer) GetAllApps(w http.ResponseWriter, r *http.Request) {
|
||||
var apps []API.App
|
||||
for _, app := range flux.appManager.GetAllApps() {
|
||||
var extApp API.App
|
||||
deploymentStatus, err := app.Deployment.Status(r.Context(), flux.docker, flux.logger)
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to get deployment status", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
extApp.Id = app.Id
|
||||
extApp.Name = app.Name
|
||||
extApp.DeploymentID = app.DeploymentID
|
||||
extApp.DeploymentStatus = deploymentStatus
|
||||
apps = append(apps, extApp)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(apps)
|
||||
}
|
||||
|
||||
func (flux *FluxServer) GetAppById(w http.ResponseWriter, r *http.Request) {
|
||||
id, err := uuid.Parse(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid app id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
app := flux.appManager.GetApp(id)
|
||||
if app == nil {
|
||||
http.Error(w, "App not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var extApp API.App
|
||||
deploymentStatus, err := app.Deployment.Status(r.Context(), flux.docker, flux.logger)
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to get deployment status", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
extApp.Id = app.Id
|
||||
extApp.Name = app.Name
|
||||
extApp.DeploymentID = app.DeploymentID
|
||||
extApp.DeploymentStatus = deploymentStatus
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(extApp)
|
||||
}
|
||||
|
||||
func (flux *FluxServer) GetAppByName(w http.ResponseWriter, r *http.Request) {
|
||||
name := r.PathValue("name")
|
||||
app := flux.appManager.GetAppByName(name)
|
||||
if app == nil {
|
||||
http.Error(w, "App not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var extApp API.App
|
||||
deploymentStatus, err := app.Deployment.Status(r.Context(), flux.docker, flux.logger)
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to get deployment status", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
extApp.Id = app.Id
|
||||
extApp.Name = app.Name
|
||||
extApp.DeploymentID = app.DeploymentID
|
||||
extApp.DeploymentStatus = deploymentStatus
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(extApp)
|
||||
}
|
||||
|
||||
func (flux *FluxServer) StartApp(w http.ResponseWriter, r *http.Request) {
|
||||
id, err := uuid.Parse(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid app id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
app := flux.appManager.GetApp(id)
|
||||
if app == nil {
|
||||
http.Error(w, "App not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := app.Deployment.Status(r.Context(), flux.docker, flux.logger)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if status == "running" {
|
||||
http.Error(w, "App is already running", http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
|
||||
app.State = "running"
|
||||
_, err = flux.db.ExecContext(r.Context(), "UPDATE apps SET state = ? WHERE id = ?", app.State, app.Id[:])
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to update app state", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
err = app.Deployment.Start(r.Context(), flux.docker)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
deploymentInternalUrl, err := app.Deployment.GetInternalUrl(flux.docker)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
newProxy, err := proxyManagerService.NewDeploymentProxy(*deploymentInternalUrl)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
flux.proxy.AddProxy(app.Deployment.URL, newProxy)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (flux *FluxServer) StopApp(w http.ResponseWriter, r *http.Request) {
|
||||
id, err := uuid.Parse(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid app id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
app := flux.appManager.GetApp(id)
|
||||
if app == nil {
|
||||
http.Error(w, "App not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := app.Deployment.Status(r.Context(), flux.docker, flux.logger)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if status == "stopped" || status == "failed" {
|
||||
http.Error(w, "App is already stopped", http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
|
||||
app.State = "stopped"
|
||||
_, err = flux.db.ExecContext(r.Context(), "UPDATE apps SET state = ? WHERE id = ?", app.State, app.Id[:])
|
||||
if err != nil {
|
||||
flux.logger.Errorw("Failed to update app state", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
err = app.Deployment.Stop(r.Context(), flux.docker)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
flux.proxy.RemoveDeployment(app.Deployment.URL)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (flux *FluxServer) DeleteAllDeploymentsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
apps := flux.appManager.GetAllApps()
|
||||
for _, app := range apps {
|
||||
err := flux.appManager.DeleteApp(app.Id)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (flux *FluxServer) DeleteDeployHandler(w http.ResponseWriter, r *http.Request) {
|
||||
id, err := uuid.Parse(r.PathValue("id"))
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid app id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
app := flux.appManager.GetApp(id)
|
||||
if app == nil {
|
||||
http.Error(w, "App not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
err = flux.appManager.DeleteApp(id)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
flux.proxy.RemoveDeployment(app.Deployment.URL)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
@@ -5,14 +5,16 @@ CREATE TABLE IF NOT EXISTS deployments (
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS apps (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
|
||||
id BLOB PRIMARY KEY,
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
state TEXT NOT NULL,
|
||||
deployment_id INTEGER,
|
||||
FOREIGN KEY(deployment_id) REFERENCES deployments(id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS containers (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,
|
||||
friendly_name TEXT NOT NULL,
|
||||
container_id TEXT NOT NULL,
|
||||
head BOOLEAN NOT NULL,
|
||||
deployment_id INTEGER NOT NULL,
|
||||
250
internal/handlers/server.go
Normal file
250
internal/handlers/server.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/google/uuid"
|
||||
"github.com/juls0730/flux/internal/docker"
|
||||
"github.com/juls0730/flux/internal/services/appManagerService"
|
||||
proxyManagerService "github.com/juls0730/flux/internal/services/proxy"
|
||||
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"github.com/juls0730/flux/pkg/API"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed schema.sql
|
||||
schema string
|
||||
DefaultConfig = pkg.DaemonConfig{
|
||||
Builder: "paketobuildpacks/builder-jammy-tiny",
|
||||
CompressionLevel: 0,
|
||||
DaemonHost: "0.0.0.0:5647",
|
||||
ProxyHost: "0.0.0.0:7465",
|
||||
}
|
||||
)
|
||||
|
||||
type FluxServer struct {
|
||||
db *sql.DB
|
||||
|
||||
docker *docker.DockerClient
|
||||
|
||||
proxy *proxyManagerService.ProxyManager
|
||||
appManager *appManagerService.AppManager
|
||||
|
||||
rootDir string
|
||||
config pkg.DaemonConfig
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func NewServer() *FluxServer {
|
||||
flux := &FluxServer{}
|
||||
|
||||
verbosity, err := strconv.Atoi(os.Getenv("FLUXD_VERBOSITY"))
|
||||
if err != nil {
|
||||
verbosity = 0
|
||||
}
|
||||
|
||||
config := zap.NewProductionConfig()
|
||||
|
||||
if os.Getenv("DEBUG") == "true" {
|
||||
config = zap.NewDevelopmentConfig()
|
||||
verbosity = -1
|
||||
}
|
||||
|
||||
config.Level = zap.NewAtomicLevelAt(zapcore.Level(verbosity))
|
||||
|
||||
lameLogger, err := config.Build()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create logger: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
flux.logger = lameLogger.Sugar()
|
||||
|
||||
rootDir := os.Getenv("FLUXD_ROOT_DIR")
|
||||
if rootDir == "" {
|
||||
rootDir = "/var/fluxd"
|
||||
}
|
||||
|
||||
flux.rootDir = rootDir
|
||||
|
||||
configPath := filepath.Join(flux.rootDir, "config.json")
|
||||
if _, err := os.Stat(configPath); err != nil {
|
||||
if err := os.MkdirAll(rootDir, 0755); err != nil {
|
||||
flux.logger.Fatalw("Failed to create fluxd directory", zap.Error(err))
|
||||
}
|
||||
|
||||
configBytes, err := json.Marshal(DefaultConfig)
|
||||
if err != nil {
|
||||
flux.logger.Fatalw("Failed to marshal default config", zap.Error(err))
|
||||
}
|
||||
|
||||
fmt.Printf("Config file not found creating default config file at %s\n", configPath)
|
||||
if err := os.WriteFile(configPath, configBytes, 0644); err != nil {
|
||||
flux.logger.Fatalw("Failed to write config file", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
configFile, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
flux.logger.Fatalw("Failed to read config file", zap.Error(err))
|
||||
}
|
||||
|
||||
// apply the config file over the default config, this way if we have missing fields, they will be filled in with
|
||||
// the default values
|
||||
flux.config = DefaultConfig
|
||||
if err := json.Unmarshal(configFile, &flux.config); err != nil {
|
||||
flux.logger.Fatalw("Failed to parse config file", zap.Error(err))
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(flux.rootDir, "fluxd.db")
|
||||
|
||||
flux.db, err = sql.Open("sqlite3", dbPath)
|
||||
if err != nil {
|
||||
flux.logger.Fatalw("Failed to open database", zap.Error(err))
|
||||
}
|
||||
|
||||
err = flux.db.Ping()
|
||||
if err != nil {
|
||||
flux.logger.Fatalw("Failed to ping database", zap.Error(err))
|
||||
}
|
||||
|
||||
_, err = flux.db.Exec(schema)
|
||||
if err != nil {
|
||||
flux.logger.Fatalw("Failed to create database schema", zap.Error(err))
|
||||
}
|
||||
|
||||
flux.docker = docker.NewDocker(nil, flux.logger)
|
||||
if err != nil {
|
||||
flux.logger.Fatalw("Failed to create docker client", zap.Error(err))
|
||||
}
|
||||
|
||||
flux.logger.Infof("Pulling builder image %s this may take a while...", flux.config.Builder)
|
||||
events, err := flux.docker.ImagePull(context.Background(), fmt.Sprintf("%s:latest", flux.config.Builder), image.PullOptions{})
|
||||
if err != nil {
|
||||
flux.logger.Fatalw("Failed to pull builder image", zap.Error(err))
|
||||
}
|
||||
|
||||
// blocking until the iamge is pulled
|
||||
io.Copy(io.Discard, events)
|
||||
|
||||
flux.proxy = proxyManagerService.NewProxyManager(flux.logger)
|
||||
|
||||
flux.appManager = appManagerService.NewAppManager(flux.db, flux.docker, flux.proxy, flux.logger)
|
||||
err = flux.appManager.Init()
|
||||
if err != nil {
|
||||
flux.logger.Fatalw("Failed to initialize apps", zap.Error(err))
|
||||
}
|
||||
|
||||
return flux
|
||||
}
|
||||
|
||||
func (s *FluxServer) Stop() {
|
||||
s.logger.Sync()
|
||||
}
|
||||
|
||||
func (s *FluxServer) ListenAndServe() error {
|
||||
s.logger.Infow("Starting server", zap.String("daemon_host", s.config.DaemonHost), zap.String("proxy_host", s.config.ProxyHost))
|
||||
|
||||
go s.proxy.ListenAndServe(s.config.ProxyHost)
|
||||
return http.ListenAndServe(s.config.DaemonHost, nil)
|
||||
}
|
||||
|
||||
func (s *FluxServer) DaemonInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(API.Info{
|
||||
CompressionLevel: s.config.CompressionLevel,
|
||||
Version: pkg.Version,
|
||||
})
|
||||
}
|
||||
|
||||
// This extracts and uploads a tar file to a temporary directory, and returns the path to the directory
|
||||
func (s *FluxServer) UploadAppCode(code io.Reader, appId uuid.UUID) (string, error) {
|
||||
var err error
|
||||
outputPath, err := os.MkdirTemp(os.TempDir(), appId.String())
|
||||
if err != nil {
|
||||
s.logger.Errorw("Failed to create project directory", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
var gzReader *gzip.Reader
|
||||
defer func() {
|
||||
if gzReader != nil {
|
||||
gzReader.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if s.config.CompressionLevel > 0 {
|
||||
gzReader, err = gzip.NewReader(code)
|
||||
if err != nil {
|
||||
s.logger.Infow("Failed to create gzip reader", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
var tarReader *tar.Reader
|
||||
|
||||
if gzReader != nil {
|
||||
tarReader = tar.NewReader(gzReader)
|
||||
} else {
|
||||
tarReader = tar.NewReader(code)
|
||||
}
|
||||
|
||||
s.logger.Infow("Extracting files for project", zap.String("project", outputPath))
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
s.logger.Debugw("Failed to read tar header", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Construct full path
|
||||
path := filepath.Join(outputPath, header.Name)
|
||||
|
||||
// Handle different file types
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if err = os.MkdirAll(path, 0755); err != nil {
|
||||
s.logger.Debugw("Failed to extract directory", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
case tar.TypeReg:
|
||||
if err = os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
s.logger.Debugw("Failed to extract directory", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
outFile, err := os.Create(path)
|
||||
if err != nil {
|
||||
s.logger.Debugw("Failed to extract file", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
if _, err = io.Copy(outFile, tarReader); err != nil {
|
||||
s.logger.Debugw("Failed to copy file during extraction", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return outputPath, nil
|
||||
}
|
||||
29
internal/models/app.go
Normal file
29
internal/models/app.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/google/uuid"
|
||||
docker "github.com/juls0730/flux/internal/docker"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type App struct {
|
||||
Id uuid.UUID `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
State string `json:"state,omitempty"`
|
||||
Deployment *Deployment `json:"-"`
|
||||
DeploymentID int64 `json:"deployment_id,omitempty"`
|
||||
}
|
||||
|
||||
func (app *App) Remove(ctx context.Context, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
|
||||
app.Deployment.Remove(ctx, dockerClient, db, logger)
|
||||
_, err := db.ExecContext(ctx, "DELETE FROM apps WHERE id = ?", app.Id[:])
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to delete app", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
340
internal/models/container.go
Normal file
340
internal/models/container.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
docker "github.com/juls0730/flux/internal/docker"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Container struct {
|
||||
ID int64 `json:"id"`
|
||||
|
||||
Name string `json:"name"` // name of the container in the docker daemon
|
||||
ContainerID docker.DockerID `json:"container_id"`
|
||||
|
||||
Head bool `json:"head"` // if the container is the head of the deployment
|
||||
FriendlyName string `json:"friendly_name"` // name used by other containers to reach this container
|
||||
Volumes []*Volume `json:"volumes"`
|
||||
Deployment *Deployment `json:"-"`
|
||||
|
||||
DeploymentID int64 `json:"deployment_id"`
|
||||
}
|
||||
|
||||
// Create a container given a container configuration and a deployment. This will do a few things:
|
||||
//
|
||||
// 1. Create the container in the docker daemon
|
||||
//
|
||||
// 2. Create the volumes for the container
|
||||
//
|
||||
// 3. Insert the container and volumes into the database
|
||||
//
|
||||
// This will not mess with containers already in the Deployment object, it is expected that this function will only be
|
||||
// called when the app in initially created
|
||||
func CreateContainer(ctx context.Context, imageName string, friendlyName string, head bool, environment []string, containerVols []pkg.Volume, deployment *Deployment, logger *zap.SugaredLogger, dockerClient *docker.DockerClient, db *sql.DB) (c *Container, err error) {
|
||||
if friendlyName == "" {
|
||||
return nil, fmt.Errorf("container friendly name is empty")
|
||||
}
|
||||
|
||||
if imageName == "" {
|
||||
return nil, fmt.Errorf("container image name is empty")
|
||||
}
|
||||
|
||||
logger.Debugw("Creating container with image", zap.String("image", imageName))
|
||||
|
||||
var volumes []*Volume
|
||||
// in the head container, we have a default volume where the project is mounted, this is important so that if the project uses sqlite for example,
|
||||
// all the data will not be lost the second the containers turns off.
|
||||
if head {
|
||||
vol, err := CreateVolume(ctx, "/workspace", dockerClient, logger)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to create head's workspace volume", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vol.Mountpoint = "/workspace"
|
||||
|
||||
volumes = append(volumes, vol)
|
||||
}
|
||||
|
||||
for _, containerVolume := range containerVols {
|
||||
if containerVolume.Mountpoint == "" {
|
||||
return nil, fmt.Errorf("mountpoint is empty")
|
||||
}
|
||||
|
||||
if containerVolume.Mountpoint == "/workspace" || containerVolume.Mountpoint == "/" {
|
||||
return nil, fmt.Errorf("invalid mountpoint")
|
||||
}
|
||||
|
||||
vol, err := CreateVolume(ctx, containerVolume.Mountpoint, dockerClient, logger)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to create volume", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumes = append(volumes, vol)
|
||||
}
|
||||
|
||||
// if the container is the head, build a list of hostnames that the container can reach by name for this deployment
|
||||
// TODO: this host list should be consistent across all containers in the deployment, not just the head
|
||||
var hosts []string
|
||||
if head {
|
||||
logger.Debug("Building host list")
|
||||
|
||||
for _, container := range deployment.Containers() {
|
||||
containerName, err := container.GetIp(dockerClient, logger)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to get container ip", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hosts = append(hosts, fmt.Sprintf("%s:%s", container.FriendlyName, containerName))
|
||||
}
|
||||
}
|
||||
|
||||
// if the container is not the head, pull the image from docker hub
|
||||
if !head {
|
||||
logger.Debug("Pulling image", zap.String("image", imageName))
|
||||
|
||||
image, err := dockerClient.ImagePull(ctx, imageName, image.PullOptions{})
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to pull image", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// blcok untile the image is pulled
|
||||
io.Copy(io.Discard, image)
|
||||
}
|
||||
|
||||
dockerVols := make([]*docker.DockerVolume, 0)
|
||||
for _, volume := range volumes {
|
||||
dockerVols = append(dockerVols, &volume.DockerVolume)
|
||||
}
|
||||
|
||||
logger.Debugw("Creating container", zap.String("image", imageName))
|
||||
dockerContainer, err := dockerClient.CreateDockerContainer(ctx, imageName, dockerVols, environment, hosts, nil)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to create container", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c = &Container{
|
||||
ContainerID: dockerContainer.ID,
|
||||
Name: dockerContainer.Name,
|
||||
FriendlyName: friendlyName,
|
||||
Volumes: volumes,
|
||||
}
|
||||
|
||||
err = db.QueryRow("INSERT INTO containers (container_id, head, friendly_name, deployment_id) VALUES (?, ?, ?, ?) RETURNING id, container_id, head, deployment_id", string(c.ContainerID), head, friendlyName, deployment.ID).Scan(&c.ID, &c.ContainerID, &c.Head, &c.DeploymentID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to insert container", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to begin transaction", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeInsertStmt, err := tx.Prepare("INSERT INTO volumes (volume_id, mountpoint, container_id) VALUES (?, ?, ?) RETURNING id, volume_id, mountpoint, container_id")
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to prepare statement", zap.Error(err))
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vol := range c.Volumes {
|
||||
logger.Debug("Inserting volume", zap.String("volume_id", vol.VolumeID), zap.String("mountpoint", vol.Mountpoint), zap.String("container_id", string(c.ContainerID)))
|
||||
err = volumeInsertStmt.QueryRow(vol.VolumeID, vol.Mountpoint, c.ID).Scan(&vol.ID, &vol.VolumeID, &vol.Mountpoint, &vol.ContainerID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to insert volume", zap.Error(err))
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to commit transaction", zap.Error(err))
|
||||
tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.Deployment = deployment
|
||||
deployment.AppendContainer(c)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Updates Container in place
|
||||
func (c *Container) Upgrade(ctx context.Context, imageName string, environment []string, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
|
||||
// Create new container with new image
|
||||
logger.Debugw("Upgrading container", zap.String("container_id", string(c.ContainerID[:12])))
|
||||
if c.Volumes == nil {
|
||||
return fmt.Errorf("no volumes found for container %s", c.ContainerID[:12])
|
||||
}
|
||||
|
||||
containerJSON, err := dockerClient.ContainerInspect(context.Background(), c.ContainerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hosts := containerJSON.HostConfig.ExtraHosts
|
||||
|
||||
var dockerVolumes []*docker.DockerVolume
|
||||
for _, volume := range c.Volumes {
|
||||
dockerVolumes = append(dockerVolumes, &docker.DockerVolume{
|
||||
VolumeID: volume.VolumeID,
|
||||
Mountpoint: volume.Mountpoint,
|
||||
})
|
||||
}
|
||||
|
||||
newDockerContainer, err := dockerClient.CreateDockerContainer(ctx, imageName, dockerVolumes, environment, hosts, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.QueryRow("INSERT INTO containers (container_id, head, friendly_name, deployment_id) VALUES (?, ?, ?, ?) RETURNING id, container_id, head, deployment_id", newDockerContainer.ID, c.Head, c.FriendlyName, c.Deployment.ID).Scan(&c.ID, &c.ContainerID, &c.Head, &c.DeploymentID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to insert container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to begin transaction", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
volumeUpdateStmt, err := tx.Prepare("UPDATE volumes SET container_id = ? WHERE id = ? RETURNING id, volume_id, mountpoint, container_id")
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
logger.Errorw("Failed to prepare statement", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vol := range c.Volumes {
|
||||
err = volumeUpdateStmt.QueryRow(c.ID, vol.ID).Scan(&vol.ID, &vol.VolumeID, &vol.Mountpoint, &vol.ContainerID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
logger.Error("Failed to update volume", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
logger.Errorw("Failed to commit transaction", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug("Upgraded container")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) Remove(ctx context.Context, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
|
||||
logger.Debugw("Removing container", zap.String("container_id", string(c.ContainerID)))
|
||||
|
||||
err := dockerClient.StopContainer(ctx, c.ContainerID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to stop container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, volume := range c.Volumes {
|
||||
logger.Debugw("Removing volume", zap.String("volume_id", volume.VolumeID))
|
||||
err := volume.Remove(ctx, dockerClient, db, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = db.ExecContext(ctx, "DELETE FROM containers WHERE container_id = ?", c.ContainerID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to delete container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return dockerClient.ContainerRemove(ctx, c.ContainerID, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
func (c *Container) Start(ctx context.Context, initial bool, db *sql.DB, dockerClient *docker.DockerClient, logger *zap.SugaredLogger) error {
|
||||
logger.Debugf("Starting container %+v", c)
|
||||
logger.Info("Starting container", zap.String("container_id", string(c.ContainerID)[:12]))
|
||||
|
||||
if !initial && c.Head {
|
||||
logger.Debug("Starting and repairing head container")
|
||||
containerJSON, err := dockerClient.ContainerInspect(ctx, c.ContainerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove yourself
|
||||
dockerClient.ContainerRemove(ctx, c.ContainerID, container.RemoveOptions{})
|
||||
|
||||
var volumes []*docker.DockerVolume
|
||||
var hosts []string
|
||||
|
||||
for _, volume := range c.Volumes {
|
||||
volumes = append(volumes, &docker.DockerVolume{
|
||||
VolumeID: volume.VolumeID,
|
||||
Mountpoint: volume.Mountpoint,
|
||||
})
|
||||
}
|
||||
|
||||
for _, supplementalContainer := range c.Deployment.Containers() {
|
||||
if supplementalContainer.Head {
|
||||
continue
|
||||
}
|
||||
|
||||
ip, err := supplementalContainer.GetIp(dockerClient, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hosts = append(hosts, fmt.Sprintf("%s:%s", supplementalContainer.FriendlyName, ip))
|
||||
}
|
||||
|
||||
// recreate yourself
|
||||
resp, err := dockerClient.CreateDockerContainer(ctx,
|
||||
containerJSON.Image,
|
||||
volumes,
|
||||
containerJSON.Config.Env,
|
||||
hosts,
|
||||
&c.Name,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.ContainerID = resp.ID
|
||||
db.Exec("UPDATE containers SET container_id = ? WHERE id = ?", string(c.ContainerID), c.ID)
|
||||
}
|
||||
|
||||
return dockerClient.ContainerStart(ctx, string(c.ContainerID), container.StartOptions{})
|
||||
}
|
||||
|
||||
func (c *Container) Wait(ctx context.Context, port uint16, dockerClient *docker.DockerClient) error {
|
||||
return dockerClient.ContainerWait(ctx, c.ContainerID, port)
|
||||
}
|
||||
|
||||
func (c *Container) GetIp(dockerClient *docker.DockerClient, logger *zap.SugaredLogger) (string, error) {
|
||||
containerJSON, err := dockerClient.ContainerInspect(context.Background(), c.ContainerID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to inspect container", zap.Error(err), zap.String("container_id", string(c.ContainerID[:12])))
|
||||
return "", err
|
||||
}
|
||||
|
||||
ip := containerJSON.NetworkSettings.IPAddress
|
||||
|
||||
return ip, nil
|
||||
}
|
||||
237
internal/models/deployment.go
Normal file
237
internal/models/deployment.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/juls0730/flux/internal/docker"
|
||||
proxyManagerService "github.com/juls0730/flux/internal/services/proxy"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Deployment struct {
|
||||
ID int64 `json:"id"`
|
||||
containers []*Container `json:"-"`
|
||||
URL string `json:"url"`
|
||||
Port uint16 `json:"port"`
|
||||
|
||||
headCache *Container
|
||||
}
|
||||
|
||||
func NewDeployment() *Deployment {
|
||||
return &Deployment{
|
||||
containers: make([]*Container, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Deployment) Remove(ctx context.Context, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
|
||||
logger.Debugw("Removing deployment", zap.Int64("id", d.ID))
|
||||
for _, container := range d.containers {
|
||||
err := container.Remove(ctx, dockerClient, db, logger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
db.ExecContext(ctx, "DELETE FROM deployments WHERE id = ?", d.ID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Deployment) Head() *Container {
|
||||
if d.headCache != nil {
|
||||
return d.headCache
|
||||
}
|
||||
|
||||
for _, container := range d.containers {
|
||||
if container.Head {
|
||||
d.headCache = container
|
||||
return container
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Deployment) Containers() []*Container {
|
||||
if d.containers == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy the slice so that we don't modify the original
|
||||
containers := make([]*Container, len(d.containers))
|
||||
copy(containers, d.containers)
|
||||
|
||||
return containers
|
||||
}
|
||||
|
||||
func (d *Deployment) AppendContainer(container *Container) {
|
||||
d.headCache = nil
|
||||
d.containers = append(d.containers, container)
|
||||
}
|
||||
|
||||
func (d *Deployment) Start(ctx context.Context, dockerClient *docker.DockerClient) error {
|
||||
for _, container := range d.containers {
|
||||
err := dockerClient.StartContainer(ctx, container.ContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start container (%s): %v", container.ContainerID[:12], err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Deployment) GetInternalUrl(dockerClient *docker.DockerClient) (*url.URL, error) {
|
||||
containerJSON, err := dockerClient.ContainerInspect(context.Background(), d.Head().ContainerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if containerJSON.NetworkSettings.IPAddress == "" {
|
||||
return nil, fmt.Errorf("no IP address found for container %s", d.Head().ContainerID[:12])
|
||||
}
|
||||
|
||||
containerUrl, err := url.Parse(fmt.Sprintf("http://%s:%d", containerJSON.NetworkSettings.IPAddress, d.Port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return containerUrl, nil
|
||||
}
|
||||
|
||||
func (d *Deployment) Stop(ctx context.Context, dockerClient *docker.DockerClient) error {
|
||||
for _, container := range d.containers {
|
||||
err := dockerClient.StopContainer(ctx, container.ContainerID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stop container (%s): %v", container.ContainerID[:12], err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gets the status of the head container, and attempt to get the supplemental containers in an aligned state
|
||||
func (deployment *Deployment) Status(ctx context.Context, dockerClient *docker.DockerClient, logger *zap.SugaredLogger) (string, error) {
|
||||
// first, get the status of the head container
|
||||
headStatus, err := dockerClient.GetContainerStatus(deployment.Head().ContainerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// then, check the status of all supplemental containers
|
||||
for _, container := range deployment.containers {
|
||||
if container.Head {
|
||||
continue
|
||||
}
|
||||
|
||||
containerStatus, err := dockerClient.GetContainerStatus(container.ContainerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// if the head is stopped, but the supplemental container is running, stop the supplemental container
|
||||
if headStatus.Status != "running" && containerStatus.Status == "running" {
|
||||
err := dockerClient.StopContainer(ctx, container.ContainerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// if the head is running, but the supplemental container is stopped, return "failed"
|
||||
if headStatus.Status == "running" && containerStatus.Status != "running" {
|
||||
logger.Debugw("Supplemental container is not running but head is, returning to failed state", zap.String("container_id", string(container.ContainerID[:12])))
|
||||
for _, supplementalContainer := range deployment.containers {
|
||||
err := dockerClient.StopContainer(ctx, supplementalContainer.ContainerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return "failed", nil
|
||||
}
|
||||
}
|
||||
|
||||
switch headStatus.Status {
|
||||
case "running":
|
||||
return "running", nil
|
||||
case "exited", "dead":
|
||||
if headStatus.ExitCode != 0 {
|
||||
// non-zero exit code in unix terminology means the program did not complete successfully
|
||||
return "failed", nil
|
||||
}
|
||||
|
||||
return "stopped", nil
|
||||
default:
|
||||
return "stopped", nil
|
||||
}
|
||||
}
|
||||
|
||||
// Takes an existing deployment, and gracefully upgrades the app to a new image
|
||||
func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig *pkg.ProjectConfig, imageName string, dockerClient *docker.DockerClient, proxyManager *proxyManagerService.ProxyManager, db *sql.DB, logger *zap.SugaredLogger) error {
|
||||
// copy the old head container since Upgrade updates the container in place
|
||||
oldHeadContainer := *deployment.Head()
|
||||
|
||||
// we only upgrade the head container, in the future we might want to allow upgrading supplemental containers, but this should work just fine for now.
|
||||
err := deployment.Head().Upgrade(ctx, imageName, projectConfig.Environment, dockerClient, db, logger)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to upgrade container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
db.Exec("DELETE FROM containers WHERE id = ?", oldHeadContainer.ID)
|
||||
|
||||
newHeadContainer := deployment.Head()
|
||||
logger.Debugw("Starting container", zap.String("container_id", string(newHeadContainer.ContainerID[:12])))
|
||||
err = newHeadContainer.Start(ctx, true, db, dockerClient, logger)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to start container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if err := newHeadContainer.Wait(ctx, projectConfig.Port, dockerClient); err != nil {
|
||||
logger.Errorw("Failed to wait for container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := db.Exec("UPDATE deployments SET url = ?, port = ? WHERE id = ?", projectConfig.Url, projectConfig.Port, deployment.ID); err != nil {
|
||||
logger.Errorw("Failed to update deployment", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new proxy that points to the new head, and replace the old one, but ensure that the old one is gracefully drained of connections
|
||||
oldProxy, ok := proxyManager.Load(deployment.URL)
|
||||
|
||||
newDeploymentInternalUrl, err := deployment.GetInternalUrl(dockerClient)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to get internal url", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
newProxy, err := proxyManagerService.NewDeploymentProxy(*newDeploymentInternalUrl)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to create deployment proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
proxyManager.RemoveDeployment(deployment.URL)
|
||||
proxyManager.AddProxy(projectConfig.Url, newProxy)
|
||||
deployment.URL = projectConfig.Url
|
||||
|
||||
// gracefully shutdown the old proxy, or if it doesnt exist, just remove the containers
|
||||
if ok {
|
||||
go oldProxy.GracefulShutdown(func() {
|
||||
err := dockerClient.DeleteDockerContainer(context.Background(), oldHeadContainer.ContainerID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to remove container", zap.Error(err))
|
||||
}
|
||||
})
|
||||
} else {
|
||||
err := dockerClient.DeleteDockerContainer(context.Background(), oldHeadContainer.ContainerID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to remove container", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
46
internal/models/volume.go
Normal file
46
internal/models/volume.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
docker "github.com/juls0730/flux/internal/docker"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Volume struct {
|
||||
ID int64 `json:"id"`
|
||||
docker.DockerVolume
|
||||
ContainerID int64 `json:"container_id"`
|
||||
}
|
||||
|
||||
// Creates a volume for a container, does not insert it into the database
|
||||
func CreateVolume(ctx context.Context, mountpoint string, dockerClient *docker.DockerClient, logger *zap.SugaredLogger) (*Volume, error) {
|
||||
logger.Debugw("Creating volume", zap.String("mountpoint", mountpoint))
|
||||
|
||||
dockerVol, err := dockerClient.CreateDockerVolume(ctx)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to create volume", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dockerVol.Mountpoint = mountpoint
|
||||
|
||||
vol := &Volume{
|
||||
DockerVolume: *dockerVol,
|
||||
}
|
||||
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
func (v *Volume) Remove(ctx context.Context, dockerClient *docker.DockerClient, db *sql.DB, logger *zap.SugaredLogger) error {
|
||||
logger.Debugw("Removing volume", zap.String("volume_id", v.VolumeID))
|
||||
|
||||
_, err := db.ExecContext(ctx, "DELETE FROM volumes WHERE volume_id = ?", v.VolumeID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to delete volume", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return dockerClient.DeleteDockerVolume(ctx, v.VolumeID)
|
||||
}
|
||||
307
internal/services/appManagerService/appmanager.go
Normal file
307
internal/services/appManagerService/appmanager.go
Normal file
@@ -0,0 +1,307 @@
|
||||
package appManagerService
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/juls0730/flux/internal/docker"
|
||||
models "github.com/juls0730/flux/internal/models"
|
||||
proxyManagerService "github.com/juls0730/flux/internal/services/proxy"
|
||||
"github.com/juls0730/flux/internal/util"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type AppManager struct {
|
||||
util.TypedMap[uuid.UUID, *models.App]
|
||||
nameIndex util.TypedMap[string, uuid.UUID]
|
||||
logger *zap.SugaredLogger
|
||||
proxyManager *proxyManagerService.ProxyManager
|
||||
dockerClient *docker.DockerClient
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewAppManager(db *sql.DB, dockerClient *docker.DockerClient, proxyManager *proxyManagerService.ProxyManager, logger *zap.SugaredLogger) *AppManager {
|
||||
return &AppManager{
|
||||
db: db,
|
||||
dockerClient: dockerClient,
|
||||
proxyManager: proxyManager,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (appManager *AppManager) CreateApp(ctx context.Context, imageName string, projectConfig *pkg.ProjectConfig, id uuid.UUID) (*models.App, error) {
|
||||
app := &models.App{
|
||||
Id: id,
|
||||
}
|
||||
appManager.logger.Debugw("Creating deployment", zap.String("id", app.Id.String()))
|
||||
|
||||
app.Deployment = models.NewDeployment()
|
||||
if app.Deployment == nil {
|
||||
appManager.logger.Errorw("Failed to create deployment")
|
||||
return nil, fmt.Errorf("failed to create deployment")
|
||||
}
|
||||
|
||||
if err := appManager.db.QueryRowContext(ctx, "INSERT INTO deployments (url, port) VALUES ($1, $2) RETURNING id, url, port", projectConfig.Url, projectConfig.Port).Scan(&app.Deployment.ID, &app.Deployment.URL, &app.Deployment.Port); err != nil {
|
||||
appManager.logger.Errorw("Failed to create deployment", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, container := range projectConfig.Containers {
|
||||
// Create a container given a container configuration and a deployment. This will do a few things:
|
||||
// 1. Create the container in the docker daemon
|
||||
// 2. Create the volumes for the container
|
||||
// 3. Insert the container and volumes into the database
|
||||
c, err := models.CreateContainer(ctx, container.ImageName, container.Name, false, container.Environment, container.Volumes, app.Deployment, appManager.logger, appManager.dockerClient, appManager.db)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to create container", zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to create container: %v", err)
|
||||
}
|
||||
|
||||
c.Start(ctx, true, appManager.db, appManager.dockerClient, appManager.logger)
|
||||
}
|
||||
|
||||
_, err := models.CreateContainer(ctx, imageName, projectConfig.Name, true, projectConfig.Environment, projectConfig.Volumes, app.Deployment, appManager.logger, appManager.dockerClient, appManager.db)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to create container", zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to create container: %v", err)
|
||||
}
|
||||
|
||||
err = appManager.db.QueryRowContext(ctx, "INSERT INTO apps (id, name, state, deployment_id) VALUES ($1, $2, $3, $4) RETURNING name, state, deployment_id", app.Id[:], projectConfig.Name, "running", app.Deployment.ID).Scan(&app.Name, &app.State, &app.DeploymentID)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to insert app", zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to insert app: %v", err)
|
||||
}
|
||||
|
||||
err = app.Deployment.Start(ctx, appManager.dockerClient)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to start deployment", zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to start deployment: %v", err)
|
||||
}
|
||||
|
||||
deploymentInternalUrl, err := app.Deployment.GetInternalUrl(appManager.dockerClient)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to get internal url", zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to get internal url: %v", err)
|
||||
}
|
||||
|
||||
newProxy, err := proxyManagerService.NewDeploymentProxy(*deploymentInternalUrl)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to create deployment proxy", zap.Error(err))
|
||||
return nil, fmt.Errorf("failed to create deployment proxy: %v", err)
|
||||
}
|
||||
|
||||
appManager.AddApp(app.Id, app)
|
||||
appManager.proxyManager.AddProxy(app.Deployment.URL, newProxy)
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func (appManager *AppManager) Upgrade(ctx context.Context, appId uuid.UUID, imageName string, projectConfig *pkg.ProjectConfig) error {
|
||||
appManager.logger.Debugw("Upgrading app", zap.String("app_id", appId.String()), zap.String("image_name", imageName))
|
||||
|
||||
app := appManager.GetApp(appId)
|
||||
if app == nil {
|
||||
appManager.logger.Errorw("App not found, but upgrade called", zap.String("app_id", appId.String()))
|
||||
return fmt.Errorf("failed to get app")
|
||||
}
|
||||
|
||||
deploymentStatus, err := app.Deployment.Status(ctx, appManager.dockerClient, appManager.logger)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to get deployment status", zap.Error(err))
|
||||
return fmt.Errorf("failed to get deployment status: %v", err)
|
||||
}
|
||||
|
||||
if deploymentStatus != "running" {
|
||||
err = app.Deployment.Start(ctx, appManager.dockerClient)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to start deployment", zap.Error(err))
|
||||
return fmt.Errorf("failed to start deployment: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = app.Deployment.Upgrade(ctx, projectConfig, imageName, appManager.dockerClient, appManager.proxyManager, appManager.db, appManager.logger)
|
||||
if err != nil {
|
||||
appManager.logger.Errorw("Failed to upgrade deployment", zap.Error(err))
|
||||
return fmt.Errorf("failed to upgrade deployment: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *AppManager) GetAppByName(name string) *models.App {
|
||||
id, ok := am.nameIndex.Load(name)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return am.GetApp(id)
|
||||
}
|
||||
|
||||
func (am *AppManager) GetApp(id uuid.UUID) *models.App {
|
||||
app, exists := am.Load(id)
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
func (am *AppManager) GetAllApps() []*models.App {
|
||||
var apps []*models.App
|
||||
am.Range(func(key uuid.UUID, app *models.App) bool {
|
||||
apps = append(apps, app)
|
||||
return true
|
||||
})
|
||||
return apps
|
||||
}
|
||||
|
||||
// removes an app from the app manager
|
||||
func (am *AppManager) RemoveApp(id uuid.UUID) {
|
||||
app, ok := am.Load(id)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
am.nameIndex.Delete(app.Name)
|
||||
am.Delete(id)
|
||||
}
|
||||
|
||||
// add a given app to the app manager
|
||||
func (am *AppManager) AddApp(id uuid.UUID, app *models.App) {
|
||||
if app.Deployment == nil || app.Deployment.Containers() == nil || app.Deployment.Head() == nil || len(app.Deployment.Containers()) == 0 || app.Name == "" {
|
||||
panic("invalid app")
|
||||
}
|
||||
|
||||
am.nameIndex.Store(app.Name, id)
|
||||
am.Store(id, app)
|
||||
}
|
||||
|
||||
// nukes an app completely
|
||||
func (am *AppManager) DeleteApp(id uuid.UUID) error {
|
||||
app := am.GetApp(id)
|
||||
if app == nil {
|
||||
return fmt.Errorf("app not found")
|
||||
}
|
||||
|
||||
am.logger.Debugw("Deleting app", zap.String("id", id.String()))
|
||||
|
||||
// calls RemoveApp
|
||||
err := app.Remove(context.Background(), am.dockerClient, am.db, am.logger)
|
||||
if err != nil {
|
||||
am.logger.Errorw("Failed to remove app", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Scan every app in the database, and create in memory structures if the deployment is already running
|
||||
func (am *AppManager) Init() error {
|
||||
am.logger.Info("Initializing deployments")
|
||||
|
||||
if am.db == nil {
|
||||
am.logger.Panic("DB is nil")
|
||||
}
|
||||
|
||||
appRows, err := am.db.Query("SELECT id, name, state, deployment_id FROM apps")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get apps: %v", err)
|
||||
}
|
||||
defer appRows.Close()
|
||||
|
||||
var apps []*models.App
|
||||
for appRows.Next() {
|
||||
var app *models.App = new(models.App)
|
||||
var appIdBlob []byte
|
||||
if err := appRows.Scan(&appIdBlob, &app.Name, &app.State, &app.DeploymentID); err != nil {
|
||||
return fmt.Errorf("failed to scan app: %v", err)
|
||||
}
|
||||
app.Id = uuid.Must(uuid.FromBytes(appIdBlob))
|
||||
app.Deployment = models.NewDeployment()
|
||||
if app.Deployment == nil {
|
||||
return fmt.Errorf("failed to create deployment")
|
||||
}
|
||||
|
||||
err := am.db.QueryRow("SELECT id, url, port FROM deployments WHERE id = ?", app.DeploymentID).Scan(&app.Deployment.ID, &app.Deployment.URL, &app.Deployment.Port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get deployment: %v", err)
|
||||
}
|
||||
am.logger.Debugw("Found deployment", zap.Int64("id", app.Deployment.ID))
|
||||
|
||||
containerRows, err := am.db.Query("SELECT id, container_id, friendly_name, deployment_id, head FROM containers WHERE deployment_id = ?", app.DeploymentID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query containers: %v", err)
|
||||
}
|
||||
defer containerRows.Close()
|
||||
|
||||
for containerRows.Next() {
|
||||
var container *models.Container = new(models.Container)
|
||||
containerRows.Scan(&container.ID, &container.ContainerID, &container.FriendlyName, &container.DeploymentID, &container.Head)
|
||||
container.Deployment = app.Deployment
|
||||
|
||||
volumeRows, err := am.db.Query("SELECT id, volume_id, container_id, mountpoint FROM volumes WHERE container_id = ?", container.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query volumes: %v", err)
|
||||
}
|
||||
defer volumeRows.Close()
|
||||
|
||||
for volumeRows.Next() {
|
||||
volume := new(models.Volume)
|
||||
volumeRows.Scan(&volume.ID, &volume.VolumeID, &volume.ContainerID, &volume.Mountpoint)
|
||||
container.Volumes = append(container.Volumes, volume)
|
||||
}
|
||||
|
||||
app.Deployment.AppendContainer(container)
|
||||
}
|
||||
|
||||
// align the state of the deployment with the state of the app
|
||||
switch app.State {
|
||||
case "running":
|
||||
err = app.Deployment.Start(context.Background(), am.dockerClient)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start deployment: %v", err)
|
||||
}
|
||||
case "stopped":
|
||||
err = app.Deployment.Stop(context.Background(), am.dockerClient)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stop deployment: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
apps = append(apps, app)
|
||||
}
|
||||
|
||||
for _, app := range apps {
|
||||
am.AddApp(app.Id, app)
|
||||
am.logger.Debugw("Added app", zap.String("id", app.Id.String()))
|
||||
status, err := app.Deployment.Status(context.Background(), am.dockerClient, am.logger)
|
||||
if err != nil {
|
||||
am.logger.Warnw("Failed to get deployment status", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
if status != "running" {
|
||||
continue
|
||||
}
|
||||
|
||||
proxyURL, err := app.Deployment.GetInternalUrl(am.dockerClient)
|
||||
if err != nil {
|
||||
am.logger.Errorw("Failed to parse proxy url", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
proxy, err := proxyManagerService.NewDeploymentProxy(*proxyURL)
|
||||
if err != nil {
|
||||
am.logger.Errorw("Failed to create proxy", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
am.proxyManager.AddProxy(app.Deployment.URL, proxy)
|
||||
am.logger.Debugw("Created proxy", zap.String("id", app.Id.String()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
133
internal/services/proxy/proxy.go
Normal file
133
internal/services/proxy/proxy.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package proxyManagerService
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/juls0730/flux/internal/util"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type DeploymentId int64
|
||||
|
||||
// this is the object that oversees the proxying of requests to the correct deployment
|
||||
type ProxyManager struct {
|
||||
util.TypedMap[string, *Proxy]
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func NewProxyManager(logger *zap.SugaredLogger) *ProxyManager {
|
||||
return &ProxyManager{
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (proxyManager *ProxyManager) ListenAndServe(host string) error {
|
||||
if host == "" {
|
||||
host = "0.0.0.0:7465"
|
||||
}
|
||||
|
||||
proxyManager.logger.Infof("Proxy server starting on http://%s", host)
|
||||
if err := http.ListenAndServe(host, proxyManager); err != nil && err != http.ErrServerClosed {
|
||||
return fmt.Errorf("failed to start proxy server: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stops forwarding traffic to a deployment
|
||||
func (proxyManager *ProxyManager) RemoveDeployment(host string) {
|
||||
proxyManager.Delete(host)
|
||||
}
|
||||
|
||||
// Starts forwarding traffic to a deployment. The deployment must be ready to recieve requests before this is called.
|
||||
func (proxyManager *ProxyManager) AddProxy(host string, proxy *Proxy) {
|
||||
proxyManager.logger.Debugw("Adding proxy", zap.String("host", host))
|
||||
proxyManager.Store(host, proxy)
|
||||
}
|
||||
|
||||
// This function is responsible for taking an http request and forwarding it to the correct deployment
|
||||
func (proxyManager *ProxyManager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
host := r.Host
|
||||
|
||||
proxyManager.logger.Debugw("Proxying request", zap.String("host", host))
|
||||
proxy, ok := proxyManager.Load(host)
|
||||
if !ok {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
proxy.proxyFunc.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
type Proxy struct {
|
||||
forwardingFor url.URL
|
||||
proxyFunc *httputil.ReverseProxy
|
||||
gracePeriod time.Duration
|
||||
activeRequests int64
|
||||
}
|
||||
|
||||
// type DeploymentProxy struct {
|
||||
// deployment *models.Deployment
|
||||
// proxy *httputil.ReverseProxy
|
||||
// gracePeriod time.Duration
|
||||
// activeRequests int64
|
||||
// }
|
||||
|
||||
// Creates a proxy for a given deployment
|
||||
func NewDeploymentProxy(forwardingFor url.URL) (*Proxy, error) {
|
||||
proxy := &Proxy{
|
||||
forwardingFor: forwardingFor,
|
||||
gracePeriod: time.Second * 30,
|
||||
activeRequests: 0,
|
||||
}
|
||||
|
||||
proxy.proxyFunc = &httputil.ReverseProxy{
|
||||
Director: func(req *http.Request) {
|
||||
req.URL = &url.URL{
|
||||
Scheme: forwardingFor.Scheme,
|
||||
Host: forwardingFor.Host,
|
||||
Path: req.URL.Path,
|
||||
}
|
||||
req.Host = forwardingFor.Host
|
||||
atomic.AddInt64(&proxy.activeRequests, 1)
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
},
|
||||
ModifyResponse: func(resp *http.Response) error {
|
||||
atomic.AddInt64(&proxy.activeRequests, -1)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return proxy, nil
|
||||
}
|
||||
|
||||
// Drains connections from a proxy
|
||||
func (p *Proxy) GracefulShutdown(shutdownFunc func()) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), p.gracePeriod)
|
||||
defer cancel()
|
||||
|
||||
done := false
|
||||
for !done {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
default:
|
||||
if atomic.LoadInt64(&p.activeRequests) == 0 {
|
||||
done = true
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
shutdownFunc()
|
||||
}
|
||||
70
internal/util/cli/project.go
Normal file
70
internal/util/cli/project.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package cli_util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"github.com/juls0730/flux/pkg/API"
|
||||
)
|
||||
|
||||
type Project struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func GetProject(command string, args []string, config pkg.CLIConfig) (*Project, error) {
|
||||
var projectName string
|
||||
|
||||
// we are in a project directory and the project is deployed
|
||||
if _, err := os.Stat(".fluxid"); err == nil {
|
||||
id, err := os.ReadFile(".fluxid")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read .fluxid: %v", err)
|
||||
}
|
||||
|
||||
app, err := GetRequest[API.App](config.DaemonURL + "/app/by-id/" + string(id))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get app: %v", err)
|
||||
}
|
||||
|
||||
return &Project{
|
||||
Id: app.Id.String(),
|
||||
Name: app.Name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// we are calling flux from a project directory, but the project isnt deployed yet
|
||||
if len(args) == 0 {
|
||||
if _, err := os.Stat("flux.json"); err != nil {
|
||||
return nil, fmt.Errorf("the current directory is not a flux project, please run flux %[1]s in the project directory", command)
|
||||
}
|
||||
|
||||
fluxConfigFile, err := os.Open("flux.json")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open flux.json: %v", err)
|
||||
}
|
||||
defer fluxConfigFile.Close()
|
||||
|
||||
var config pkg.ProjectConfig
|
||||
if err := json.NewDecoder(fluxConfigFile).Decode(&config); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode flux.json: %v", err)
|
||||
}
|
||||
|
||||
projectName = config.Name
|
||||
} else {
|
||||
projectName = args[0]
|
||||
}
|
||||
|
||||
// we are calling flux with a project name (ie `flux start my-project`)
|
||||
app, err := GetRequest[API.App](config.DaemonURL + "/app/by-name/" + projectName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get app: %v", err)
|
||||
}
|
||||
|
||||
return &Project{
|
||||
Id: app.Id.String(),
|
||||
Name: app.Name,
|
||||
}, nil
|
||||
}
|
||||
86
internal/util/cli/request.go
Normal file
86
internal/util/cli/request.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package cli_util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// make a function that makes an http GET request to the daemon and returns data of type T
|
||||
func GetRequest[T any](url string) (*T, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http get request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
responseBody = []byte(strings.TrimSuffix(string(responseBody), "\n"))
|
||||
|
||||
return nil, fmt.Errorf("http get request failed: %s", responseBody)
|
||||
}
|
||||
|
||||
var data T
|
||||
if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode http response: %v", err)
|
||||
}
|
||||
|
||||
return &data, nil
|
||||
}
|
||||
|
||||
func DeleteRequest(url string) error {
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete: %v", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
responseBody = []byte(strings.TrimSuffix(string(responseBody), "\n"))
|
||||
|
||||
return fmt.Errorf("delete failed: %s", responseBody)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func PutRequest(url string, data io.Reader) error {
|
||||
req, err := http.NewRequest("PUT", url, data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to put: %v", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to put: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
responseBody = []byte(strings.TrimSuffix(string(responseBody), "\n"))
|
||||
|
||||
return fmt.Errorf("put failed: %s", responseBody)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package models
|
||||
package cli_util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -44,11 +44,14 @@ func NewCustomStdout(spinner *CustomSpinnerWriter) *CustomStdout {
|
||||
}
|
||||
}
|
||||
|
||||
// We have this custom writer because we want to have a spinner at the bottom of the terminal, but we dont want to have
|
||||
// it interfere with the output of the command
|
||||
func (w *CustomStdout) Write(p []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
n, err = os.Stdout.Write([]byte(fmt.Sprintf("\033[2K\r%s", p)))
|
||||
// clear line and carriage return
|
||||
n, err = os.Stdout.Write(fmt.Appendf(nil, "\033[2K\r%s", p))
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
51
internal/util/lock.go
Normal file
51
internal/util/lock.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var ErrLocked = fmt.Errorf("item is locked")
|
||||
|
||||
type MutexLock[T comparable] struct {
|
||||
mu sync.Mutex
|
||||
deployed map[T]context.CancelFunc
|
||||
}
|
||||
|
||||
func NewMutexLock[T comparable]() *MutexLock[T] {
|
||||
return &MutexLock[T]{
|
||||
deployed: make(map[T]context.CancelFunc),
|
||||
}
|
||||
}
|
||||
|
||||
func (dt *MutexLock[T]) Lock(id T, ctx context.Context) (context.Context, error) {
|
||||
dt.mu.Lock()
|
||||
defer dt.mu.Unlock()
|
||||
|
||||
// Check if the object is locked
|
||||
if _, exists := dt.deployed[id]; exists {
|
||||
return nil, ErrLocked
|
||||
}
|
||||
|
||||
// Create a context that can be cancelled
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Store the cancel function
|
||||
dt.deployed[id] = cancel
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (dt *MutexLock[T]) Unlock(id T) {
|
||||
dt.mu.Lock()
|
||||
defer dt.mu.Unlock()
|
||||
|
||||
// Remove the app from deployed tracking
|
||||
if cancel, exists := dt.deployed[id]; exists {
|
||||
// Cancel the context
|
||||
cancel()
|
||||
// Remove from map
|
||||
delete(dt.deployed, id)
|
||||
}
|
||||
}
|
||||
30
internal/util/typedmap.go
Normal file
30
internal/util/typedmap.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package util
|
||||
|
||||
import "sync"
|
||||
|
||||
type TypedMap[K comparable, V any] struct {
|
||||
internal sync.Map
|
||||
}
|
||||
|
||||
func (m *TypedMap[K, V]) Load(key K) (V, bool) {
|
||||
val, ok := m.internal.Load(key)
|
||||
if !ok {
|
||||
var zero V
|
||||
return zero, false
|
||||
}
|
||||
return val.(V), true
|
||||
}
|
||||
|
||||
func (m *TypedMap[K, V]) Store(key K, value V) {
|
||||
m.internal.Store(key, value)
|
||||
}
|
||||
|
||||
func (m *TypedMap[K, V]) Delete(key K) {
|
||||
m.internal.Delete(key)
|
||||
}
|
||||
|
||||
func (m *TypedMap[K, V]) Range(f func(key K, value V) bool) {
|
||||
m.internal.Range(func(k, v any) bool {
|
||||
return f(k.(K), v.(V))
|
||||
})
|
||||
}
|
||||
10
pkg/API/app.go
Normal file
10
pkg/API/app.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package API
|
||||
|
||||
import "github.com/google/uuid"
|
||||
|
||||
type App struct {
|
||||
Id uuid.UUID `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
DeploymentID int64 `json:"deployment_id,omitempty"`
|
||||
DeploymentStatus string `json:"deployment_status,omitempty"`
|
||||
}
|
||||
19
pkg/API/deploy.go
Normal file
19
pkg/API/deploy.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package API
|
||||
|
||||
import (
|
||||
"mime/multipart"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
)
|
||||
|
||||
type DeployRequest struct {
|
||||
Id uuid.UUID `form:"id"`
|
||||
Config pkg.ProjectConfig `form:"config"`
|
||||
Code multipart.File `form:"code"`
|
||||
}
|
||||
|
||||
type DeploymentEvent struct {
|
||||
Message any `json:"message"`
|
||||
Stage string `json:"stage"`
|
||||
}
|
||||
8
pkg/API/info.go
Normal file
8
pkg/API/info.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package API
|
||||
|
||||
type Info struct {
|
||||
// an int between 0 and 9 that represents the compression level, 0 being no compression, 9 being maximum compression
|
||||
CompressionLevel int `json:"compression_level"`
|
||||
// the version of the daemon (see pkg/version.go)
|
||||
Version string `json:"version"`
|
||||
}
|
||||
@@ -1,9 +1,41 @@
|
||||
package pkg
|
||||
|
||||
type ProjectConfig struct {
|
||||
type Volume struct {
|
||||
Mountpoint string `json:"mountpoint,omitempty"`
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Port uint16 `json:"port,omitempty"`
|
||||
EnvFile string `json:"env_file,omitempty"`
|
||||
ImageName string `json:"image,omitempty"`
|
||||
Volumes []Volume `json:"volumes,omitempty"`
|
||||
Environment []string `json:"environment,omitempty"`
|
||||
}
|
||||
|
||||
type ProjectConfig struct {
|
||||
// name of the app
|
||||
Name string `json:"name,omitempty"`
|
||||
// public url of the app
|
||||
// TODO: support multiple urls
|
||||
Url string `json:"url,omitempty"`
|
||||
// Port the web app listens on from the head container
|
||||
Port uint16 `json:"port,omitempty"`
|
||||
EnvFile string `json:"env_file,omitempty"`
|
||||
// additional environment variables
|
||||
Environment []string `json:"environment,omitempty"`
|
||||
// volumes for the head container
|
||||
Volumes []Volume `json:"volumes,omitempty"`
|
||||
// config for supplemental containersm
|
||||
Containers []Container `json:"containers,omitempty"`
|
||||
}
|
||||
|
||||
type DaemonConfig struct {
|
||||
Builder string `json:"builder"`
|
||||
DisableDeleteAll bool `json:"disable_delete_all"`
|
||||
CompressionLevel int `json:"compression_level"`
|
||||
DaemonHost string `json:"host"` // default is 0.0.0.0:5647
|
||||
ProxyHost string `json:"proxy_host"` // default is 0.0.0.0:7465
|
||||
}
|
||||
|
||||
type CLIConfig struct {
|
||||
DaemonURL string `json:"daemon_url,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package pkg
|
||||
|
||||
type App struct {
|
||||
ID int64 `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
DeploymentID int64 `json:"deployment_id,omitempty"`
|
||||
DeploymentStatus string `json:"deployment_status,omitempty"`
|
||||
}
|
||||
|
||||
type Compression struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Level int `json:"level,omitempty"`
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
Compression Compression `json:"compression"`
|
||||
}
|
||||
|
||||
type DeploymentEvent struct {
|
||||
Message interface{} `json:"message"`
|
||||
}
|
||||
3
pkg/version.go
Normal file
3
pkg/version.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package pkg
|
||||
|
||||
const Version = "2025.05.04-00"
|
||||
251
server/app.go
251
server/app.go
@@ -1,251 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type App struct {
|
||||
ID int64 `json:"id,omitempty"`
|
||||
Deployment *Deployment `json:"-"`
|
||||
Name string `json:"name,omitempty"`
|
||||
DeploymentID int64 `json:"deployment_id,omitempty"`
|
||||
}
|
||||
|
||||
func CreateApp(ctx context.Context, imageName string, projectPath string, projectConfig pkg.ProjectConfig) (*App, error) {
|
||||
app := &App{
|
||||
Name: projectConfig.Name,
|
||||
}
|
||||
logger.Debugw("Creating deployment", zap.String("name", app.Name))
|
||||
|
||||
deployment, err := CreateDeployment(projectConfig.Port, projectConfig.Url, Flux.db)
|
||||
app.Deployment = deployment
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to create deployment", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
container, err := CreateContainer(ctx, imageName, projectPath, projectConfig, true, deployment)
|
||||
if err != nil || container == nil {
|
||||
return nil, fmt.Errorf("failed to create container: %v", err)
|
||||
}
|
||||
|
||||
if appInsertStmt == nil {
|
||||
appInsertStmt, err = Flux.db.Prepare("INSERT INTO apps (name, deployment_id) VALUES ($1, $2) RETURNING id, name, deployment_id")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to prepare statement: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// create app in the database
|
||||
err = appInsertStmt.QueryRow(projectConfig.Name, deployment.ID).Scan(&app.ID, &app.Name, &app.DeploymentID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to insert app: %v", err)
|
||||
}
|
||||
|
||||
err = deployment.Start(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start deployment: %v", err)
|
||||
}
|
||||
|
||||
Flux.appManager.AddApp(app.Name, app)
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func (app *App) Upgrade(ctx context.Context, projectConfig pkg.ProjectConfig, imageName string, projectPath string) error {
|
||||
logger.Debugw("Upgrading deployment", zap.String("name", app.Name))
|
||||
|
||||
// if deploy is not started, start it
|
||||
deploymentStatus, err := app.Deployment.Status(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get deployment status: %v", err)
|
||||
}
|
||||
|
||||
if deploymentStatus != "running" {
|
||||
err = app.Deployment.Start(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start deployment: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = app.Deployment.Upgrade(ctx, projectConfig, imageName, projectPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upgrade deployment: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *App) Remove(ctx context.Context) error {
|
||||
Flux.appManager.RemoveApp(app.Name)
|
||||
|
||||
err := app.Deployment.Remove(ctx)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to remove deployment", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = Flux.db.Exec("DELETE FROM apps WHERE id = ?", app.ID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to delete app", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
projectPath := filepath.Join(Flux.rootDir, "apps", app.Name)
|
||||
err = os.RemoveAll(projectPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove project directory: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type AppManager struct {
|
||||
sync.Map
|
||||
}
|
||||
|
||||
func (am *AppManager) GetApp(name string) *App {
|
||||
app, exists := am.Load(name)
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
return app.(*App)
|
||||
}
|
||||
|
||||
func (am *AppManager) GetAllApps() []*App {
|
||||
var apps []*App
|
||||
am.Range(func(key, value interface{}) bool {
|
||||
if app, ok := value.(*App); ok {
|
||||
apps = append(apps, app)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return apps
|
||||
}
|
||||
|
||||
func (am *AppManager) RemoveApp(name string) {
|
||||
am.Delete(name)
|
||||
}
|
||||
|
||||
func (am *AppManager) AddApp(name string, app *App) {
|
||||
if app.Deployment.Containers == nil || app.Deployment.Head == nil || len(app.Deployment.Containers) == 0 {
|
||||
panic("nil containers")
|
||||
}
|
||||
|
||||
am.Store(name, app)
|
||||
}
|
||||
|
||||
func (am *AppManager) DeleteApp(name string) error {
|
||||
app := am.GetApp(name)
|
||||
if app == nil {
|
||||
return fmt.Errorf("app not found")
|
||||
}
|
||||
|
||||
err := app.Remove(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
am.Delete(name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *AppManager) Init() {
|
||||
logger.Info("Initializing deployments")
|
||||
|
||||
if Flux.db == nil {
|
||||
logger.Panic("DB is nil")
|
||||
}
|
||||
|
||||
rows, err := Flux.db.Query("SELECT id, name, deployment_id FROM apps")
|
||||
if err != nil {
|
||||
logger.Warnw("Failed to query apps", zap.Error(err))
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var apps []App
|
||||
for rows.Next() {
|
||||
var app App
|
||||
if err := rows.Scan(&app.ID, &app.Name, &app.DeploymentID); err != nil {
|
||||
logger.Warnw("Failed to scan app", zap.Error(err))
|
||||
return
|
||||
}
|
||||
apps = append(apps, app)
|
||||
}
|
||||
|
||||
for _, app := range apps {
|
||||
deployment := &Deployment{}
|
||||
var headContainer *Container
|
||||
Flux.db.QueryRow("SELECT id, url, port FROM deployments WHERE id = ?", app.DeploymentID).Scan(&deployment.ID, &deployment.URL, &deployment.Port)
|
||||
deployment.Containers = make([]*Container, 0)
|
||||
|
||||
rows, err = Flux.db.Query("SELECT id, container_id, deployment_id, head FROM containers WHERE deployment_id = ?", app.DeploymentID)
|
||||
if err != nil {
|
||||
logger.Warnw("Failed to query containers", zap.Error(err))
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var container Container
|
||||
var containerIDString string
|
||||
rows.Scan(&container.ID, &containerIDString, &container.DeploymentID, &container.Head)
|
||||
container.Deployment = deployment
|
||||
copy(container.ContainerID[:], containerIDString)
|
||||
|
||||
if container.Head {
|
||||
if headContainer != nil {
|
||||
logger.Fatal("Several containers are marked as head")
|
||||
}
|
||||
|
||||
headContainer = &container
|
||||
}
|
||||
|
||||
rows, err := Flux.db.Query("SELECT id, volume_id, container_id, mountpoint FROM volumes WHERE container_id = ?", container.ContainerID[:])
|
||||
if err != nil {
|
||||
logger.Warnw("Failed to query volumes", zap.Error(err))
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var volume Volume
|
||||
rows.Scan(&volume.ID, &volume.VolumeID, &volume.ContainerID, &volume.Mountpoint)
|
||||
container.Volumes = append(container.Volumes, volume)
|
||||
}
|
||||
|
||||
deployment.Containers = append(deployment.Containers, &container)
|
||||
}
|
||||
|
||||
if headContainer == nil {
|
||||
logger.Fatal("head container is nil!")
|
||||
}
|
||||
|
||||
deployment.Head = headContainer
|
||||
app.Deployment = deployment
|
||||
am.AddApp(app.Name, &app)
|
||||
|
||||
status, err := deployment.Status(context.Background())
|
||||
if err != nil {
|
||||
logger.Warnw("Failed to get deployment status", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
if status != "running" {
|
||||
continue
|
||||
}
|
||||
|
||||
deployment.Proxy, _ = deployment.NewDeploymentProxy()
|
||||
Flux.proxy.AddDeployment(deployment)
|
||||
}
|
||||
}
|
||||
@@ -1,390 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
volumeInsertStmt *sql.Stmt
|
||||
volumeUpdateStmt *sql.Stmt
|
||||
containerInsertStmt *sql.Stmt
|
||||
)
|
||||
|
||||
type Volume struct {
|
||||
ID int64 `json:"id"`
|
||||
VolumeID string `json:"volume_id"`
|
||||
Mountpoint string `json:"mountpoint"`
|
||||
ContainerID string `json:"container_id"`
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
ID int64 `json:"id"`
|
||||
Head bool `json:"head"` // if the container is the head of the deployment
|
||||
Deployment *Deployment `json:"-"`
|
||||
Volumes []Volume `json:"volumes"`
|
||||
ContainerID [64]byte `json:"container_id"`
|
||||
DeploymentID int64 `json:"deployment_id"`
|
||||
}
|
||||
|
||||
func CreateDockerVolume(ctx context.Context) (vol *Volume, err error) {
|
||||
dockerVolume, err := Flux.dockerClient.VolumeCreate(ctx, volume.CreateOptions{
|
||||
Driver: "local",
|
||||
DriverOpts: map[string]string{},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create volume: %v", err)
|
||||
}
|
||||
|
||||
logger.Debugw("Volume created", zap.String("volume_id", dockerVolume.Name), zap.String("mountpoint", dockerVolume.Mountpoint))
|
||||
|
||||
vol = &Volume{
|
||||
VolumeID: dockerVolume.Name,
|
||||
}
|
||||
|
||||
return vol, nil
|
||||
}
|
||||
|
||||
func CreateDockerContainer(ctx context.Context, imageName, projectPath string, projectConfig pkg.ProjectConfig, vol *Volume) (*Container, error) {
|
||||
containerName := fmt.Sprintf("%s-%s", projectConfig.Name, time.Now().Format("20060102-150405"))
|
||||
|
||||
if projectConfig.EnvFile != "" {
|
||||
envBytes, err := os.Open(filepath.Join(projectPath, projectConfig.EnvFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open env file: %v", err)
|
||||
}
|
||||
defer envBytes.Close()
|
||||
|
||||
envVars, err := godotenv.Parse(envBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse env file: %v", err)
|
||||
}
|
||||
|
||||
for key, value := range envVars {
|
||||
projectConfig.Environment = append(projectConfig.Environment, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debugw("Creating container", zap.String("container_id", containerName))
|
||||
resp, err := Flux.dockerClient.ContainerCreate(ctx, &container.Config{
|
||||
Image: imageName,
|
||||
Env: projectConfig.Environment,
|
||||
Volumes: map[string]struct{}{
|
||||
vol.VolumeID: {},
|
||||
},
|
||||
},
|
||||
&container.HostConfig{
|
||||
RestartPolicy: container.RestartPolicy{Name: container.RestartPolicyUnlessStopped},
|
||||
NetworkMode: "bridge",
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeVolume,
|
||||
Source: vol.VolumeID,
|
||||
Target: vol.Mountpoint,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
containerName,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Container{
|
||||
ContainerID: [64]byte([]byte(resp.ID)),
|
||||
Volumes: []Volume{*vol},
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func CreateContainer(ctx context.Context, imageName, projectPath string, projectConfig pkg.ProjectConfig, head bool, deployment *Deployment) (c *Container, err error) {
|
||||
logger.Debugw("Creating container with image", zap.String("image", imageName))
|
||||
|
||||
if projectConfig.EnvFile != "" {
|
||||
envBytes, err := os.Open(filepath.Join(projectPath, projectConfig.EnvFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open env file: %v", err)
|
||||
}
|
||||
defer envBytes.Close()
|
||||
|
||||
envVars, err := godotenv.Parse(envBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse env file: %v", err)
|
||||
}
|
||||
|
||||
for key, value := range envVars {
|
||||
projectConfig.Environment = append(projectConfig.Environment, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
}
|
||||
|
||||
var vol *Volume
|
||||
vol, err = CreateDockerVolume(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vol.Mountpoint = "/workspace"
|
||||
|
||||
if volumeInsertStmt == nil {
|
||||
volumeInsertStmt, err = Flux.db.Prepare("INSERT INTO volumes (volume_id, mountpoint, container_id) VALUES (?, ?, ?) RETURNING id, volume_id, mountpoint, container_id")
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to prepare statement", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
c, err = CreateDockerContainer(ctx, imageName, projectPath, projectConfig, vol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if containerInsertStmt == nil {
|
||||
containerInsertStmt, err = Flux.db.Prepare("INSERT INTO containers (container_id, head, deployment_id) VALUES ($1, $2, $3) RETURNING id, container_id, head, deployment_id")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var containerIDString string
|
||||
err = containerInsertStmt.QueryRow(c.ContainerID[:], head, deployment.ID).Scan(&c.ID, &containerIDString, &c.Head, &c.DeploymentID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(c.ContainerID[:], containerIDString)
|
||||
|
||||
err = volumeInsertStmt.QueryRow(vol.VolumeID, vol.Mountpoint, c.ContainerID[:]).Scan(&vol.ID, &vol.VolumeID, &vol.Mountpoint, &vol.ContainerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.Deployment = deployment
|
||||
if head {
|
||||
deployment.Head = c
|
||||
}
|
||||
deployment.Containers = append(deployment.Containers, c)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Container) Upgrade(ctx context.Context, imageName, projectPath string, projectConfig pkg.ProjectConfig) (*Container, error) {
|
||||
// Create new container with new image
|
||||
logger.Debugw("Upgrading container", zap.ByteString("container_id", c.ContainerID[:12]))
|
||||
if c.Volumes == nil {
|
||||
return nil, fmt.Errorf("no volumes found for container %s", c.ContainerID[:12])
|
||||
}
|
||||
|
||||
vol := &c.Volumes[0]
|
||||
|
||||
newContainer, err := CreateDockerContainer(ctx, imageName, projectPath, projectConfig, vol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newContainer.Deployment = c.Deployment
|
||||
|
||||
if containerInsertStmt == nil {
|
||||
containerInsertStmt, err = Flux.db.Prepare("INSERT INTO containers (container_id, head, deployment_id) VALUES ($1, $2, $3) RETURNING id, container_id, head, deployment_id")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var containerIDString string
|
||||
err = containerInsertStmt.QueryRow(newContainer.ContainerID[:], c.Head, c.Deployment.ID).Scan(&newContainer.ID, &containerIDString, &newContainer.Head, &newContainer.DeploymentID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to insert container", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
copy(newContainer.ContainerID[:], containerIDString)
|
||||
|
||||
if volumeUpdateStmt == nil {
|
||||
volumeUpdateStmt, err = Flux.db.Prepare("UPDATE volumes SET container_id = ? WHERE id = ? RETURNING id, volume_id, mountpoint, container_id")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
vol = &newContainer.Volumes[0]
|
||||
volumeUpdateStmt.QueryRow(newContainer.ContainerID[:], vol.ID).Scan(&vol.ID, &vol.VolumeID, &vol.Mountpoint, &vol.ContainerID)
|
||||
|
||||
logger.Debug("Upgraded container")
|
||||
|
||||
return newContainer, nil
|
||||
}
|
||||
|
||||
func (c *Container) Start(ctx context.Context) error {
|
||||
return Flux.dockerClient.ContainerStart(ctx, string(c.ContainerID[:]), container.StartOptions{})
|
||||
}
|
||||
|
||||
func (c *Container) Stop(ctx context.Context) error {
|
||||
return Flux.dockerClient.ContainerStop(ctx, string(c.ContainerID[:]), container.StopOptions{})
|
||||
}
|
||||
|
||||
func (c *Container) Remove(ctx context.Context) error {
|
||||
err := RemoveDockerContainer(ctx, string(c.ContainerID[:]))
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove container (%s): %v", c.ContainerID[:12], err)
|
||||
}
|
||||
|
||||
tx, err := Flux.db.Begin()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to begin transaction", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec("DELETE FROM containers WHERE container_id = ?", c.ContainerID[:])
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
for _, volume := range c.Volumes {
|
||||
if err := RemoveVolume(ctx, volume.VolumeID); err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("failed to remove volume (%s): %v", volume.VolumeID, err)
|
||||
}
|
||||
|
||||
_, err = tx.Exec("DELETE FROM volumes WHERE volume_id = ?", volume.VolumeID)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
logger.Errorw("Failed to commit transaction", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Container) Wait(ctx context.Context, port uint16) error {
|
||||
return WaitForDockerContainer(ctx, string(c.ContainerID[:]), port)
|
||||
}
|
||||
|
||||
func (c *Container) Status(ctx context.Context) (string, error) {
|
||||
containerJSON, err := Flux.dockerClient.ContainerInspect(ctx, string(c.ContainerID[:]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return containerJSON.State.Status, nil
|
||||
}
|
||||
|
||||
// RemoveContainer stops and removes a container, but be warned that this will not remove the container from the database
|
||||
func RemoveDockerContainer(ctx context.Context, containerID string) error {
|
||||
if err := Flux.dockerClient.ContainerStop(ctx, containerID, container.StopOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to stop container (%s): %v", containerID[:12], err)
|
||||
}
|
||||
|
||||
if err := Flux.dockerClient.ContainerRemove(ctx, containerID, container.RemoveOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to remove container (%s): %v", containerID[:12], err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// scuffed af "health check" for docker containers
|
||||
func WaitForDockerContainer(ctx context.Context, containerID string, containerPort uint16) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("container failed to become ready in time")
|
||||
|
||||
default:
|
||||
containerJSON, err := Flux.dockerClient.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if containerJSON.State.Running {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s:%d/", containerJSON.NetworkSettings.IPAddress, containerPort))
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GracefullyRemoveDockerContainer(ctx context.Context, containerID string) error {
|
||||
timeout := 30
|
||||
err := Flux.dockerClient.ContainerStop(ctx, containerID, container.StopOptions{
|
||||
Timeout: &timeout,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stop container: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return Flux.dockerClient.ContainerRemove(ctx, containerID, container.RemoveOptions{})
|
||||
default:
|
||||
containerJSON, err := Flux.dockerClient.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !containerJSON.State.Running {
|
||||
return Flux.dockerClient.ContainerRemove(ctx, containerID, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RemoveVolume(ctx context.Context, volumeID string) error {
|
||||
logger.Debugw("Removed volume", zap.String("volume_id", volumeID))
|
||||
|
||||
if err := Flux.dockerClient.VolumeRemove(ctx, volumeID, true); err != nil {
|
||||
return fmt.Errorf("failed to remove volume (%s): %v", volumeID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findExistingDockerContainers(ctx context.Context, containerPrefix string) (map[string]bool, error) {
|
||||
containers, err := Flux.dockerClient.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var existingContainers map[string]bool = make(map[string]bool)
|
||||
for _, container := range containers {
|
||||
if strings.HasPrefix(container.Names[0], fmt.Sprintf("/%s-", containerPrefix)) {
|
||||
existingContainers[container.ID] = true
|
||||
}
|
||||
}
|
||||
|
||||
return existingContainers, nil
|
||||
}
|
||||
529
server/deploy.go
529
server/deploy.go
@@ -1,529 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
appInsertStmt *sql.Stmt
|
||||
)
|
||||
|
||||
type DeployRequest struct {
|
||||
Config multipart.File `form:"config"`
|
||||
Code multipart.File `form:"code"`
|
||||
}
|
||||
|
||||
type DeployResponse struct {
|
||||
App App `json:"app"`
|
||||
}
|
||||
|
||||
type DeploymentLock struct {
|
||||
mu sync.Mutex
|
||||
deployed map[string]context.CancelFunc
|
||||
}
|
||||
|
||||
func NewDeploymentLock() *DeploymentLock {
|
||||
return &DeploymentLock{
|
||||
deployed: make(map[string]context.CancelFunc),
|
||||
}
|
||||
}
|
||||
|
||||
func (dt *DeploymentLock) StartDeployment(appName string, ctx context.Context) (context.Context, error) {
|
||||
dt.mu.Lock()
|
||||
defer dt.mu.Unlock()
|
||||
|
||||
// Check if the app is already being deployed
|
||||
if _, exists := dt.deployed[appName]; exists {
|
||||
return nil, fmt.Errorf("app %s is already being deployed", appName)
|
||||
}
|
||||
|
||||
// Create a context that can be cancelled
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Store the cancel function
|
||||
dt.deployed[appName] = cancel
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (dt *DeploymentLock) CompleteDeployment(appName string) {
|
||||
dt.mu.Lock()
|
||||
defer dt.mu.Unlock()
|
||||
|
||||
// Remove the app from deployed tracking
|
||||
if cancel, exists := dt.deployed[appName]; exists {
|
||||
// Cancel the context
|
||||
cancel()
|
||||
// Remove from map
|
||||
delete(dt.deployed, appName)
|
||||
}
|
||||
}
|
||||
|
||||
var deploymentLock = NewDeploymentLock()
|
||||
|
||||
type DeploymentEvent struct {
|
||||
Stage string `json:"stage"`
|
||||
Message interface{} `json:"message"`
|
||||
StatusCode int `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func (s *FluxServer) DeployHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if Flux.appManager == nil {
|
||||
panic("App manager is nil")
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "test/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
|
||||
err := r.ParseMultipartForm(10 << 30) // 10 GiB
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to parse multipart form", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var deployRequest DeployRequest
|
||||
deployRequest.Config, _, err = r.FormFile("config")
|
||||
if err != nil {
|
||||
http.Error(w, "No flux.json found", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer deployRequest.Config.Close()
|
||||
|
||||
var projectConfig pkg.ProjectConfig
|
||||
if err := json.NewDecoder(deployRequest.Config).Decode(&projectConfig); err != nil {
|
||||
logger.Errorw("Failed to decode config", zap.Error(err))
|
||||
|
||||
http.Error(w, "Invalid flux.json", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, err := deploymentLock.StartDeployment(projectConfig.Name, r.Context())
|
||||
if err != nil {
|
||||
// This will happen if the app is already being deployed
|
||||
http.Error(w, err.Error(), http.StatusConflict)
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
deploymentLock.CompleteDeployment(projectConfig.Name)
|
||||
}()
|
||||
|
||||
flusher, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
http.Error(w, "Streaming unsupported!", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusMultiStatus)
|
||||
|
||||
eventChannel := make(chan DeploymentEvent, 10)
|
||||
defer close(eventChannel)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
wg.Add(1)
|
||||
go func(w http.ResponseWriter, flusher http.Flusher) {
|
||||
defer wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case event, ok := <-eventChannel:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ev := pkg.DeploymentEvent{
|
||||
Message: event.Message,
|
||||
}
|
||||
|
||||
eventJSON, err := json.Marshal(ev)
|
||||
if err != nil {
|
||||
// Write error directly to ResponseWriter
|
||||
jsonErr := json.NewEncoder(w).Encode(err)
|
||||
if jsonErr != nil {
|
||||
fmt.Fprint(w, "data: {\"message\": \"Error encoding error\"}\n\n")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "data: %s\n\n", err.Error())
|
||||
if flusher != nil {
|
||||
flusher.Flush()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "event: %s\n", event.Stage)
|
||||
fmt.Fprintf(w, "data: %s\n\n", eventJSON)
|
||||
if flusher != nil {
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
if event.Stage == "error" || event.Stage == "complete" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}(w, flusher)
|
||||
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "start",
|
||||
Message: "Uploading code",
|
||||
}
|
||||
|
||||
deployRequest.Code, _, err = r.FormFile("code")
|
||||
if err != nil {
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: "No code archive found",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
return
|
||||
}
|
||||
defer deployRequest.Code.Close()
|
||||
|
||||
if projectConfig.Name == "" || projectConfig.Url == "" || projectConfig.Port == 0 {
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: "Invalid flux.json, a name, url, and port must be specified",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infow("Deploying project", zap.String("name", projectConfig.Name), zap.String("url", projectConfig.Url))
|
||||
|
||||
projectPath, err := s.UploadAppCode(deployRequest.Code, projectConfig)
|
||||
if err != nil {
|
||||
logger.Infow("Failed to upload code", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to upload code: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Streams the each line of the pipe into the eventChannel, this closes the pipe when the function exits
|
||||
var pipeGroup sync.WaitGroup
|
||||
|
||||
streamPipe := func(pipe io.ReadCloser) {
|
||||
pipeGroup.Add(1)
|
||||
defer pipeGroup.Done()
|
||||
|
||||
scanner := bufio.NewScanner(pipe)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "cmd_output",
|
||||
Message: line,
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to read pipe: %s", err),
|
||||
}
|
||||
logger.Errorw("Error reading pipe", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debugw("Preparing project", zap.String("name", projectConfig.Name))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "preparing",
|
||||
Message: "Preparing project",
|
||||
}
|
||||
|
||||
prepareCmd := exec.Command("go", "generate")
|
||||
prepareCmd.Dir = projectPath
|
||||
cmdOut, err := prepareCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to get stdout pipe", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to get stdout pipe: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
cmdErr, err := prepareCmd.StderrPipe()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to get stderr pipe", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to get stderr pipe: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = prepareCmd.Start()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to prepare project", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to prepare project: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
go streamPipe(cmdOut)
|
||||
go streamPipe(cmdErr)
|
||||
|
||||
pipeGroup.Wait()
|
||||
|
||||
err = prepareCmd.Wait()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to prepare project", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to prepare project: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "building",
|
||||
Message: "Building project image",
|
||||
}
|
||||
|
||||
logger.Debugw("Building image for project", zap.String("name", projectConfig.Name))
|
||||
imageName := fmt.Sprintf("flux_%s-image", projectConfig.Name)
|
||||
buildCmd := exec.Command("pack", "build", imageName, "--builder", s.config.Builder)
|
||||
buildCmd.Dir = projectPath
|
||||
cmdOut, err = buildCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to get stdout pipe", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to get stdout pipe: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
cmdErr, err = buildCmd.StderrPipe()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to get stderr pipe", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to get stderr pipe: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = buildCmd.Start()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to build image", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to build image: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
go streamPipe(cmdOut)
|
||||
go streamPipe(cmdErr)
|
||||
|
||||
pipeGroup.Wait()
|
||||
|
||||
err = buildCmd.Wait()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to build image", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to build image: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
app := Flux.appManager.GetApp(projectConfig.Name)
|
||||
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "creating",
|
||||
Message: "Creating deployment",
|
||||
}
|
||||
|
||||
if app == nil {
|
||||
app, err = CreateApp(ctx, imageName, projectPath, projectConfig)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to create app", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to create app: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = app.Upgrade(ctx, projectConfig, imageName, projectPath)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to upgrade app", zap.Error(err))
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "error",
|
||||
Message: fmt.Sprintf("Failed to upgrade app: %s", err),
|
||||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
eventChannel <- DeploymentEvent{
|
||||
Stage: "complete",
|
||||
Message: app,
|
||||
}
|
||||
|
||||
logger.Infow("App deployed successfully", zap.String("name", app.Name))
|
||||
}
|
||||
|
||||
func (s *FluxServer) StartDeployHandler(w http.ResponseWriter, r *http.Request) {
|
||||
name := r.PathValue("name")
|
||||
|
||||
app := Flux.appManager.GetApp(name)
|
||||
if app == nil {
|
||||
http.Error(w, "App not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := app.Deployment.Status(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if status == "running" {
|
||||
http.Error(w, "App is already running", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
err = app.Deployment.Start(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if app.Deployment.Proxy == nil {
|
||||
app.Deployment.Proxy, _ = app.Deployment.NewDeploymentProxy()
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *FluxServer) StopDeployHandler(w http.ResponseWriter, r *http.Request) {
|
||||
name := r.PathValue("name")
|
||||
|
||||
app := Flux.appManager.GetApp(name)
|
||||
if app == nil {
|
||||
http.Error(w, "App not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := app.Deployment.Status(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if status == "stopped" {
|
||||
http.Error(w, "App is already stopped", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
err = app.Deployment.Stop(r.Context())
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *FluxServer) DeleteDeployHandler(w http.ResponseWriter, r *http.Request) {
|
||||
name := r.PathValue("name")
|
||||
|
||||
logger.Debugw("Deleting deployment", zap.String("name", name))
|
||||
|
||||
err := Flux.appManager.DeleteApp(name)
|
||||
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to delete app", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *FluxServer) DeleteAllDeploymentsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
for _, app := range Flux.appManager.GetAllApps() {
|
||||
err := Flux.appManager.DeleteApp(app.Name)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to remove app", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (s *FluxServer) ListAppsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// for each app, get the deployment status
|
||||
var apps []pkg.App
|
||||
for _, app := range Flux.appManager.GetAllApps() {
|
||||
var extApp pkg.App
|
||||
deploymentStatus, err := app.Deployment.Status(r.Context())
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to get deployment status", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
extApp.ID = app.ID
|
||||
extApp.Name = app.Name
|
||||
extApp.DeploymentID = app.DeploymentID
|
||||
extApp.DeploymentStatus = deploymentStatus
|
||||
apps = append(apps, extApp)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(apps)
|
||||
}
|
||||
|
||||
func (s *FluxServer) DaemonInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(pkg.Info{
|
||||
Compression: s.config.Compression,
|
||||
})
|
||||
}
|
||||
@@ -1,220 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/juls0730/flux/pkg"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
deploymentInsertStmt *sql.Stmt
|
||||
)
|
||||
|
||||
type Deployment struct {
|
||||
ID int64 `json:"id"`
|
||||
Head *Container `json:"head,omitempty"`
|
||||
Containers []*Container `json:"containers,omitempty"`
|
||||
Proxy *DeploymentProxy `json:"-"`
|
||||
URL string `json:"url"`
|
||||
Port uint16 `json:"port"`
|
||||
}
|
||||
|
||||
// Creates a deployment and containers in the database
|
||||
func CreateDeployment(port uint16, appUrl string, db *sql.DB) (*Deployment, error) {
|
||||
var deployment Deployment
|
||||
var err error
|
||||
|
||||
if deploymentInsertStmt == nil {
|
||||
deploymentInsertStmt, err = db.Prepare("INSERT INTO deployments (url, port) VALUES ($1, $2) RETURNING id, url, port")
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to prepare statement", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = deploymentInsertStmt.QueryRow(appUrl, port).Scan(&deployment.ID, &deployment.URL, &deployment.Port)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to insert deployment", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &deployment, nil
|
||||
}
|
||||
|
||||
func (deployment *Deployment) Upgrade(ctx context.Context, projectConfig pkg.ProjectConfig, imageName string, projectPath string) error {
|
||||
existingContainers, err := findExistingDockerContainers(ctx, projectConfig.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find existing containers: %v", err)
|
||||
}
|
||||
|
||||
container, err := deployment.Head.Upgrade(ctx, imageName, projectPath, projectConfig)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to upgrade container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// copy(container.ContainerID[:], containerIDString)
|
||||
deployment.Head = container
|
||||
deployment.Containers = append(deployment.Containers, container)
|
||||
|
||||
logger.Debugw("Starting container", zap.ByteString("container_id", container.ContainerID[:12]))
|
||||
err = container.Start(ctx)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to start container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if err := container.Wait(ctx, projectConfig.Port); err != nil {
|
||||
logger.Errorw("Failed to wait for container", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := Flux.db.Exec("UPDATE deployments SET url = ?, port = ? WHERE id = ?", projectConfig.Url, projectConfig.Port, deployment.ID); err != nil {
|
||||
logger.Errorw("Failed to update deployment", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new proxy that points to the new head, and replace the old one, but ensure that the old one is gracefully shutdown
|
||||
oldProxy := deployment.Proxy
|
||||
deployment.Proxy, err = deployment.NewDeploymentProxy()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to create deployment proxy", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := Flux.db.Begin()
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to begin transaction", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
var containers []*Container
|
||||
var oldContainers []*Container
|
||||
for _, container := range deployment.Containers {
|
||||
if existingContainers[string(container.ContainerID[:])] {
|
||||
logger.Debugw("Deleting container from db", zap.ByteString("container_id", container.ContainerID[:12]))
|
||||
|
||||
_, err = tx.Exec("DELETE FROM containers WHERE id = ?", container.ID)
|
||||
oldContainers = append(oldContainers, container)
|
||||
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to delete container", zap.Error(err))
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
containers = append(containers, container)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
logger.Errorw("Failed to commit transaction", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
if oldProxy != nil {
|
||||
go oldProxy.GracefulShutdown(oldContainers)
|
||||
} else {
|
||||
for _, container := range oldContainers {
|
||||
err := RemoveDockerContainer(context.Background(), string(container.ContainerID[:]))
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to remove container", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deployment.Containers = containers
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Deployment) Remove(ctx context.Context) error {
|
||||
for _, container := range d.Containers {
|
||||
err := container.Remove(ctx)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to remove container (%s): %v\n", container.ContainerID[:12], err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
Flux.proxy.RemoveDeployment(d)
|
||||
|
||||
_, err := Flux.db.Exec("DELETE FROM deployments WHERE id = ?", d.ID)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to delete deployment", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Deployment) Start(ctx context.Context) error {
|
||||
for _, container := range d.Containers {
|
||||
err := container.Start(ctx)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to start container (%s): %v\n", container.ContainerID[:12], err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if d.Proxy == nil {
|
||||
d.Proxy, _ = d.NewDeploymentProxy()
|
||||
Flux.proxy.AddDeployment(d)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Deployment) Stop(ctx context.Context) error {
|
||||
for _, container := range d.Containers {
|
||||
err := container.Stop(ctx)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to start container (%s): %v\n", container.ContainerID[:12], err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
Flux.proxy.RemoveDeployment(d)
|
||||
d.Proxy = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Deployment) Status(ctx context.Context) (string, error) {
|
||||
var status string
|
||||
if d == nil {
|
||||
return "", fmt.Errorf("deployment is nil")
|
||||
}
|
||||
|
||||
if d.Containers == nil {
|
||||
return "", fmt.Errorf("containers are nil")
|
||||
}
|
||||
|
||||
for _, container := range d.Containers {
|
||||
containerStatus, err := container.Status(ctx)
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to get container status", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
// if not all containers are in the same state
|
||||
if status != "" && status != containerStatus {
|
||||
return "", fmt.Errorf("malformed deployment")
|
||||
}
|
||||
|
||||
status = containerStatus
|
||||
}
|
||||
|
||||
switch status {
|
||||
case "running":
|
||||
return "running", nil
|
||||
case "exited":
|
||||
return "stopped", nil
|
||||
default:
|
||||
return "pending", nil
|
||||
}
|
||||
}
|
||||
117
server/proxy.go
117
server/proxy.go
@@ -1,117 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Proxy struct {
|
||||
deployments sync.Map
|
||||
}
|
||||
|
||||
func (p *Proxy) RemoveDeployment(deployment *Deployment) {
|
||||
p.deployments.Delete(deployment.URL)
|
||||
}
|
||||
|
||||
func (p *Proxy) AddDeployment(deployment *Deployment) {
|
||||
logger.Debugw("Adding deployment", zap.String("url", deployment.URL))
|
||||
p.deployments.Store(deployment.URL, deployment)
|
||||
}
|
||||
|
||||
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
host := r.Host
|
||||
|
||||
deployment, ok := p.deployments.Load(host)
|
||||
if !ok {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
atomic.AddInt64(&deployment.(*Deployment).Proxy.activeRequests, 1)
|
||||
|
||||
deployment.(*Deployment).Proxy.proxy.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
type DeploymentProxy struct {
|
||||
deployment *Deployment
|
||||
proxy *httputil.ReverseProxy
|
||||
gracePeriod time.Duration
|
||||
activeRequests int64
|
||||
}
|
||||
|
||||
func (deployment *Deployment) NewDeploymentProxy() (*DeploymentProxy, error) {
|
||||
if deployment == nil {
|
||||
return nil, fmt.Errorf("deployment is nil")
|
||||
}
|
||||
|
||||
containerJSON, err := Flux.dockerClient.ContainerInspect(context.Background(), string(deployment.Head.ContainerID[:]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if containerJSON.NetworkSettings.IPAddress == "" {
|
||||
return nil, fmt.Errorf("no IP address found for container %s", deployment.Head.ContainerID[:12])
|
||||
}
|
||||
|
||||
containerUrl, err := url.Parse(fmt.Sprintf("http://%s:%d", containerJSON.NetworkSettings.IPAddress, deployment.Port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proxy := &httputil.ReverseProxy{
|
||||
Director: func(req *http.Request) {
|
||||
req.URL = containerUrl
|
||||
req.Host = containerUrl.Host
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
},
|
||||
ModifyResponse: func(resp *http.Response) error {
|
||||
atomic.AddInt64(&deployment.Proxy.activeRequests, -1)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return &DeploymentProxy{
|
||||
deployment: deployment,
|
||||
proxy: proxy,
|
||||
gracePeriod: time.Second * 30,
|
||||
activeRequests: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (dp *DeploymentProxy) GracefulShutdown(oldContainers []*Container) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), dp.gracePeriod)
|
||||
defer cancel()
|
||||
|
||||
done := false
|
||||
for !done {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
done = true
|
||||
default:
|
||||
if atomic.LoadInt64(&dp.activeRequests) == 0 {
|
||||
done = true
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range oldContainers {
|
||||
err := RemoveDockerContainer(context.Background(), string(container.ContainerID[:]))
|
||||
if err != nil {
|
||||
logger.Errorw("Failed to remove container", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
253
server/server.go
253
server/server.go
@@ -1,253 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
_ "embed"
|
||||
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/juls0730/flux/pkg"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed schema.sql
|
||||
schemaBytes []byte
|
||||
DefaultConfig = FluxServerConfig{
|
||||
Builder: "paketobuildpacks/builder-jammy-tiny",
|
||||
Compression: pkg.Compression{
|
||||
Enabled: false,
|
||||
Level: 0,
|
||||
},
|
||||
}
|
||||
Flux *FluxServer
|
||||
logger *zap.SugaredLogger
|
||||
)
|
||||
|
||||
type FluxServerConfig struct {
|
||||
Builder string `json:"builder"`
|
||||
Compression pkg.Compression `json:"compression"`
|
||||
}
|
||||
|
||||
type FluxServer struct {
|
||||
config FluxServerConfig
|
||||
db *sql.DB
|
||||
proxy *Proxy
|
||||
rootDir string
|
||||
appManager *AppManager
|
||||
dockerClient *client.Client
|
||||
Logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func NewFluxServer() *FluxServer {
|
||||
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
logger.Fatalw("Failed to create docker client", zap.Error(err))
|
||||
}
|
||||
|
||||
rootDir := os.Getenv("FLUXD_ROOT_DIR")
|
||||
if rootDir == "" {
|
||||
rootDir = "/var/fluxd"
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(rootDir, 0755); err != nil {
|
||||
logger.Fatalw("Failed to create fluxd directory", zap.Error(err))
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite3", filepath.Join(rootDir, "fluxd.db"))
|
||||
if err != nil {
|
||||
logger.Fatalw("Failed to open database", zap.Error(err))
|
||||
}
|
||||
|
||||
_, err = db.Exec(string(schemaBytes))
|
||||
if err != nil {
|
||||
logger.Fatalw("Failed to create database schema", zap.Error(err))
|
||||
}
|
||||
|
||||
return &FluxServer{
|
||||
db: db,
|
||||
proxy: &Proxy{},
|
||||
appManager: new(AppManager),
|
||||
rootDir: rootDir,
|
||||
dockerClient: dockerClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FluxServer) Stop() {
|
||||
s.Logger.Sync()
|
||||
}
|
||||
|
||||
func NewServer() *FluxServer {
|
||||
verbosity, err := strconv.Atoi(os.Getenv("FLUXD_VERBOSITY"))
|
||||
if err != nil {
|
||||
verbosity = 0
|
||||
}
|
||||
|
||||
config := zap.NewProductionConfig()
|
||||
|
||||
if os.Getenv("DEBUG") == "true" {
|
||||
config = zap.NewDevelopmentConfig()
|
||||
verbosity = -1
|
||||
}
|
||||
|
||||
config.Level = zap.NewAtomicLevelAt(zapcore.Level(verbosity))
|
||||
|
||||
lameLogger, err := config.Build()
|
||||
logger = lameLogger.Sugar()
|
||||
|
||||
if err != nil {
|
||||
logger.Fatalw("Failed to create logger", zap.Error(err))
|
||||
}
|
||||
|
||||
Flux = NewFluxServer()
|
||||
Flux.Logger = logger
|
||||
|
||||
var serverConfig FluxServerConfig
|
||||
|
||||
// parse config, if it doesnt exist, create it and use the default config
|
||||
configPath := filepath.Join(Flux.rootDir, "config.json")
|
||||
if _, err := os.Stat(configPath); err != nil {
|
||||
if err := os.MkdirAll(Flux.rootDir, 0755); err != nil {
|
||||
logger.Fatalw("Failed to create fluxd directory", zap.Error(err))
|
||||
}
|
||||
|
||||
configBytes, err := json.Marshal(DefaultConfig)
|
||||
if err != nil {
|
||||
logger.Fatalw("Failed to marshal default config", zap.Error(err))
|
||||
}
|
||||
|
||||
logger.Debugw("Config file not found creating default config file at", zap.String("path", configPath))
|
||||
if err := os.WriteFile(configPath, configBytes, 0644); err != nil {
|
||||
logger.Fatalw("Failed to write config file", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
configFile, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
logger.Fatalw("Failed to read config file", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(configFile, &serverConfig); err != nil {
|
||||
logger.Fatalw("Failed to parse config file", zap.Error(err))
|
||||
}
|
||||
|
||||
Flux.config = serverConfig
|
||||
|
||||
logger.Infof("Pulling builder image %s this may take a while...", serverConfig.Builder)
|
||||
events, err := Flux.dockerClient.ImagePull(context.Background(), fmt.Sprintf("%s:latest", serverConfig.Builder), image.PullOptions{})
|
||||
if err != nil {
|
||||
logger.Fatalw("Failed to pull builder image", zap.Error(err))
|
||||
}
|
||||
|
||||
// blocking wait for the iamge to be pulled
|
||||
io.Copy(io.Discard, events)
|
||||
|
||||
logger.Infow("Successfully pulled builder image", zap.String("image", serverConfig.Builder))
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(Flux.rootDir, "apps"), 0755); err != nil {
|
||||
logger.Fatalw("Failed to create apps directory", zap.Error(err))
|
||||
}
|
||||
|
||||
Flux.appManager.Init()
|
||||
|
||||
port := os.Getenv("FLUXD_PROXY_PORT")
|
||||
if port == "" {
|
||||
port = "7465"
|
||||
}
|
||||
|
||||
go func() {
|
||||
logger.Infof("Proxy server starting on http://127.0.0.1:%s", port)
|
||||
if err := http.ListenAndServe(fmt.Sprintf(":%s", port), Flux.proxy); err != nil && err != http.ErrServerClosed {
|
||||
logger.Fatalw("Proxy server error", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return Flux
|
||||
}
|
||||
|
||||
func (s *FluxServer) UploadAppCode(code io.Reader, projectConfig pkg.ProjectConfig) (string, error) {
|
||||
var err error
|
||||
projectPath := filepath.Join(s.rootDir, "apps", projectConfig.Name)
|
||||
if err = os.MkdirAll(projectPath, 0755); err != nil {
|
||||
logger.Errorw("Failed to create project directory", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
var gzReader *gzip.Reader
|
||||
defer func() {
|
||||
if gzReader != nil {
|
||||
gzReader.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if s.config.Compression.Enabled {
|
||||
gzReader, err = gzip.NewReader(code)
|
||||
if err != nil {
|
||||
logger.Infow("Failed to create gzip reader", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
var tarReader *tar.Reader
|
||||
|
||||
if gzReader != nil {
|
||||
tarReader = tar.NewReader(gzReader)
|
||||
} else {
|
||||
tarReader = tar.NewReader(code)
|
||||
}
|
||||
|
||||
logger.Infow("Extracting files for project", zap.String("project", projectPath))
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
logger.Debugw("Failed to read tar header", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Construct full path
|
||||
path := filepath.Join(projectPath, header.Name)
|
||||
|
||||
// Handle different file types
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if err = os.MkdirAll(path, 0755); err != nil {
|
||||
logger.Debugw("Failed to extract directory", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
case tar.TypeReg:
|
||||
if err = os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
logger.Debugw("Failed to extract directory", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
outFile, err := os.Create(path)
|
||||
if err != nil {
|
||||
logger.Debugw("Failed to extract file", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
if _, err = io.Copy(outFile, tarReader); err != nil {
|
||||
logger.Debugw("Failed to copy file during extraction", zap.Error(err))
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return projectPath, nil
|
||||
}
|
||||
@@ -1,13 +1,14 @@
|
||||
{
|
||||
"name": "Flux",
|
||||
"version": "0.0.1",
|
||||
"description": "Flux is a lightweight self-hosted pseudo-paas for golang web apps.",
|
||||
"description": "Flux is a lightweight self-hosted micro-paas for golang web apps.",
|
||||
"author": "juls0730",
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"build:daemon": "go build -o fluxd cmd/fluxd/main.go",
|
||||
"build:cli": "go build -o flux cmd/flux/main.go",
|
||||
"run:daemon": "go run cmd/fluxd/main.go",
|
||||
"build:daemon": "go build -o fluxd cmd/daemon/main.go",
|
||||
"build:cli": "go build -o flux cmd/cli/main.go",
|
||||
"build:all": "go build -o fluxd cmd/daemon/main.go && go build -o flux cmd/cli/main.go",
|
||||
"run:daemon": "go run cmd/daemon/main.go",
|
||||
"run:cli": "go run cmd/flux/main.go"
|
||||
},
|
||||
"pattern": "**/*.go",
|
||||
|
||||
Reference in New Issue
Block a user