mirror of
https://github.com/0xJacky/nginx-ui.git
synced 2025-05-11 02:15:48 +02:00
feat(ota): upgrade container
This commit is contained in:
parent
a88f8646e6
commit
e2c43be24a
19 changed files with 708 additions and 28 deletions
|
@ -26,10 +26,11 @@ func NewAppCmd() *cli.Command {
|
|||
},
|
||||
},
|
||||
{
|
||||
Name: "reset-password",
|
||||
Usage: "Reset the initial user password",
|
||||
Name: "reset-password",
|
||||
Usage: "Reset the initial user password",
|
||||
Action: user.ResetInitUserPassword,
|
||||
},
|
||||
UpgradeDockerStep2Command,
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
|
|
23
internal/cmd/upgrade_docker.go
Normal file
23
internal/cmd/upgrade_docker.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/0xJacky/Nginx-UI/internal/docker"
|
||||
"github.com/uozi-tech/cosy/logger"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
// Command to be executed in the temporary container
|
||||
var UpgradeDockerStep2Command = &cli.Command{
|
||||
Name: "upgrade-docker-step2",
|
||||
Usage: "Execute the second step of Docker container upgrade (to be run inside the temp container)",
|
||||
Action: UpgradeDockerStep2,
|
||||
}
|
||||
|
||||
// UpgradeDockerStep2 executes the second step in the temporary container
|
||||
func UpgradeDockerStep2(ctx context.Context, command *cli.Command) error {
|
||||
logger.Info("Starting Docker OTA upgrade step 2 from CLI...")
|
||||
|
||||
return docker.UpgradeStepTwo(ctx)
|
||||
}
|
|
@ -12,4 +12,9 @@ var (
|
|||
ErrContainerStatusUnknown = e.New(500006, "container status unknown")
|
||||
ErrInspectContainer = e.New(500007, "failed to inspect container: {0}")
|
||||
ErrNginxNotRunningInAnotherContainer = e.New(500008, "nginx is not running in another container")
|
||||
ErrFailedToGetHostname = e.New(500009, "failed to get hostname: {0}")
|
||||
ErrFailedToPullImage = e.New(500010, "failed to pull image: {0}")
|
||||
ErrFailedToInspectCurrentContainer = e.New(500011, "failed to inspect current container: {0}")
|
||||
ErrFailedToCreateTempContainer = e.New(500012, "failed to create temp container: {0}")
|
||||
ErrFailedToStartTempContainer = e.New(500013, "failed to start temp container: {0}")
|
||||
)
|
||||
|
|
329
internal/docker/ota.go
Normal file
329
internal/docker/ota.go
Normal file
|
@ -0,0 +1,329 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/0xJacky/Nginx-UI/internal/upgrader"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/uozi-tech/cosy"
|
||||
"github.com/uozi-tech/cosy/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
ImageName = "uozi/nginx-ui"
|
||||
TempPrefix = "nginx-ui-temp-"
|
||||
OldSuffix = "_old"
|
||||
)
|
||||
|
||||
// getTimestampedTempName returns a temporary container name with timestamp
|
||||
func getTimestampedTempName() string {
|
||||
return fmt.Sprintf("%s%d", TempPrefix, time.Now().Unix())
|
||||
}
|
||||
|
||||
// removeAllTempContainers removes all containers with the TempPrefix
|
||||
func removeAllTempContainers(ctx context.Context, cli *client.Client) (err error) {
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
for _, name := range c.Names {
|
||||
processedName := strings.TrimPrefix(name, "/")
|
||||
if strings.HasPrefix(processedName, TempPrefix) {
|
||||
err = cli.ContainerRemove(ctx, c.ID, container.RemoveOptions{Force: true})
|
||||
if err != nil {
|
||||
logger.Error("Failed to remove temp container:", err)
|
||||
} else {
|
||||
logger.Info("Successfully removed temp container:", processedName)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpgradeStepOne Trigger in the OTA upgrade
|
||||
func UpgradeStepOne(channel string) (err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
// 1. Get the tag of the latest release
|
||||
release, err := upgrader.GetRelease(channel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tag := release.TagName
|
||||
|
||||
// 2. Pull the image
|
||||
cli, err := initClient()
|
||||
if err != nil {
|
||||
return cosy.WrapErrorWithParams(ErrClientNotInitialized, err.Error())
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Pull the image with the specified tag
|
||||
out, err := cli.ImagePull(ctx, fmt.Sprintf("%s:%s", ImageName, tag), image.PullOptions{})
|
||||
if err != nil {
|
||||
return cosy.WrapErrorWithParams(ErrFailedToPullImage, err.Error())
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
// Wait for pull to complete by reading the output
|
||||
io.Copy(os.Stdout, out)
|
||||
|
||||
// 3. Create a temp container
|
||||
// Clean up any existing temp containers
|
||||
err = removeAllTempContainers(ctx, cli)
|
||||
if err != nil {
|
||||
logger.Error("Failed to clean up existing temp containers:", err)
|
||||
// Continue execution despite cleanup errors
|
||||
}
|
||||
|
||||
// Generate timestamped temp container name
|
||||
tempContainerName := getTimestampedTempName()
|
||||
|
||||
// Get current container name
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return cosy.WrapErrorWithParams(ErrFailedToGetHostname, err.Error())
|
||||
}
|
||||
containerInfo, err := cli.ContainerInspect(ctx, hostname)
|
||||
if err != nil {
|
||||
return cosy.WrapErrorWithParams(ErrFailedToInspectCurrentContainer, err.Error())
|
||||
}
|
||||
currentContainerName := strings.TrimPrefix(containerInfo.Name, "/")
|
||||
|
||||
// Set up the command for the temp container to execute step 2
|
||||
upgradeCmd := []string{"./nginx-ui", "upgrade-docker-step2"}
|
||||
|
||||
// Add old container name as environment variable
|
||||
containerEnv := containerInfo.Config.Env
|
||||
containerEnv = append(containerEnv, fmt.Sprintf("NGINX_UI_CONTAINER_NAME=%s", currentContainerName))
|
||||
|
||||
// Create temp container using new image
|
||||
_, err = cli.ContainerCreate(
|
||||
ctx,
|
||||
&container.Config{
|
||||
Image: fmt.Sprintf("%s:%s", ImageName, tag),
|
||||
Cmd: upgradeCmd, // Use upgrade command instead of original command
|
||||
Env: containerEnv,
|
||||
},
|
||||
&container.HostConfig{
|
||||
Binds: containerInfo.HostConfig.Binds,
|
||||
PortBindings: containerInfo.HostConfig.PortBindings,
|
||||
RestartPolicy: containerInfo.HostConfig.RestartPolicy,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
tempContainerName,
|
||||
)
|
||||
if err != nil {
|
||||
return cosy.WrapErrorWithParams(ErrFailedToCreateTempContainer, err.Error())
|
||||
}
|
||||
|
||||
// Start the temp container to execute step 2
|
||||
err = cli.ContainerStart(ctx, tempContainerName, container.StartOptions{})
|
||||
if err != nil {
|
||||
return cosy.WrapErrorWithParams(ErrFailedToStartTempContainer, err.Error())
|
||||
}
|
||||
|
||||
// Output status information
|
||||
logger.Info("Docker OTA upgrade step 1 completed. Temp container started to execute step 2.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpgradeStepTwo Trigger in the temp container
|
||||
func UpgradeStepTwo(ctx context.Context) (err error) {
|
||||
// 1. Copy the old config
|
||||
cli, err := initClient()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Get old container name from environment variable, fallback to settings if not available
|
||||
currentContainerName := os.Getenv("NGINX_UI_CONTAINER_NAME")
|
||||
if currentContainerName == "" {
|
||||
return errors.New("could not find old container name")
|
||||
}
|
||||
// Get the current running temp container name
|
||||
// Since we can't directly get our own container name from inside, we'll search all temp containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list containers")
|
||||
}
|
||||
|
||||
// Find containers with the temp prefix
|
||||
var tempContainerName string
|
||||
for _, c := range containers {
|
||||
for _, name := range c.Names {
|
||||
processedName := strings.TrimPrefix(name, "/")
|
||||
if strings.HasPrefix(processedName, TempPrefix) {
|
||||
tempContainerName = processedName
|
||||
break
|
||||
}
|
||||
}
|
||||
if tempContainerName != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if tempContainerName == "" {
|
||||
return errors.New("could not find temp container")
|
||||
}
|
||||
|
||||
// Get temp container info to get the new image
|
||||
tempContainerInfo, err := cli.ContainerInspect(ctx, tempContainerName)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to inspect temp container")
|
||||
}
|
||||
newImage := tempContainerInfo.Config.Image
|
||||
|
||||
// Get current container info
|
||||
oldContainerInfo, err := cli.ContainerInspect(ctx, currentContainerName)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to inspect current container")
|
||||
}
|
||||
|
||||
// 2. Stop the old container and rename to _old
|
||||
err = cli.ContainerStop(ctx, currentContainerName, container.StopOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stop current container")
|
||||
}
|
||||
|
||||
// Rename the old container with _old suffix
|
||||
err = cli.ContainerRename(ctx, currentContainerName, currentContainerName+OldSuffix)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename old container")
|
||||
}
|
||||
|
||||
// 3. Use the old config to create and start a new container with the updated image
|
||||
// Create new container with original config but using the new image
|
||||
newContainerEnv := oldContainerInfo.Config.Env
|
||||
// Pass the old container name to the new container
|
||||
newContainerEnv = append(newContainerEnv, fmt.Sprintf("NGINX_UI_CONTAINER_NAME=%s", currentContainerName))
|
||||
|
||||
_, err = cli.ContainerCreate(
|
||||
ctx,
|
||||
&container.Config{
|
||||
Image: newImage,
|
||||
Cmd: oldContainerInfo.Config.Cmd,
|
||||
Env: newContainerEnv,
|
||||
Entrypoint: oldContainerInfo.Config.Entrypoint,
|
||||
Labels: oldContainerInfo.Config.Labels,
|
||||
ExposedPorts: oldContainerInfo.Config.ExposedPorts,
|
||||
Volumes: oldContainerInfo.Config.Volumes,
|
||||
WorkingDir: oldContainerInfo.Config.WorkingDir,
|
||||
},
|
||||
&container.HostConfig{
|
||||
Binds: oldContainerInfo.HostConfig.Binds,
|
||||
PortBindings: oldContainerInfo.HostConfig.PortBindings,
|
||||
RestartPolicy: oldContainerInfo.HostConfig.RestartPolicy,
|
||||
NetworkMode: oldContainerInfo.HostConfig.NetworkMode,
|
||||
Mounts: oldContainerInfo.HostConfig.Mounts,
|
||||
Privileged: oldContainerInfo.HostConfig.Privileged,
|
||||
},
|
||||
nil,
|
||||
nil,
|
||||
currentContainerName,
|
||||
)
|
||||
if err != nil {
|
||||
// If creation fails, try to recover
|
||||
recoverErr := cli.ContainerRename(ctx, currentContainerName+OldSuffix, currentContainerName)
|
||||
if recoverErr == nil {
|
||||
// Start old container
|
||||
recoverErr = cli.ContainerStart(ctx, currentContainerName, container.StartOptions{})
|
||||
if recoverErr == nil {
|
||||
return errors.Wrap(err, "failed to create new container, recovered to old container")
|
||||
}
|
||||
}
|
||||
return errors.Wrap(err, "failed to create new container and failed to recover")
|
||||
}
|
||||
|
||||
// Start the new container
|
||||
err = cli.ContainerStart(ctx, currentContainerName, container.StartOptions{})
|
||||
if err != nil {
|
||||
// If startup fails, try to recover
|
||||
// First remove the failed new container
|
||||
removeErr := cli.ContainerRemove(ctx, currentContainerName, container.RemoveOptions{Force: true})
|
||||
if removeErr != nil {
|
||||
logger.Error("Failed to remove failed new container:", removeErr)
|
||||
}
|
||||
|
||||
// Rename the old container back to original
|
||||
recoverErr := cli.ContainerRename(ctx, currentContainerName+OldSuffix, currentContainerName)
|
||||
if recoverErr == nil {
|
||||
// Start old container
|
||||
recoverErr = cli.ContainerStart(ctx, currentContainerName, container.StartOptions{})
|
||||
if recoverErr == nil {
|
||||
return errors.Wrap(err, "failed to start new container, recovered to old container")
|
||||
}
|
||||
}
|
||||
return errors.Wrap(err, "failed to start new container and failed to recover")
|
||||
}
|
||||
|
||||
logger.Info("Docker OTA upgrade step 2 completed successfully. New container is running.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpgradeStepThree Trigger in the new container
|
||||
func UpgradeStepThree() error {
|
||||
ctx := context.Background()
|
||||
// Remove the old container
|
||||
cli, err := initClient()
|
||||
if err != nil {
|
||||
return cosy.WrapErrorWithParams(ErrClientNotInitialized, err.Error())
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// Get old container name from environment variable, fallback to settings if not available
|
||||
currentContainerName := os.Getenv("NGINX_UI_CONTAINER_NAME")
|
||||
if currentContainerName == "" {
|
||||
logger.Warn("Old container name not found in environment, skipping cleanup")
|
||||
return nil
|
||||
}
|
||||
oldContainerName := currentContainerName + OldSuffix
|
||||
|
||||
// Check if old container exists and remove it if it does
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list containers")
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
for _, name := range c.Names {
|
||||
processedName := strings.TrimPrefix(name, "/")
|
||||
// Remove old container
|
||||
if processedName == oldContainerName {
|
||||
err = cli.ContainerRemove(ctx, c.ID, container.RemoveOptions{Force: true})
|
||||
if err != nil {
|
||||
logger.Error("Failed to remove old container:", err)
|
||||
// Continue execution, don't interrupt because of failure to remove old container
|
||||
} else {
|
||||
logger.Info("Successfully removed old container:", oldContainerName)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up all temp containers
|
||||
err = removeAllTempContainers(ctx, cli)
|
||||
if err != nil {
|
||||
logger.Error("Failed to clean up temp containers:", err)
|
||||
// Continue execution despite cleanup errors
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/0xJacky/Nginx-UI/internal/cert"
|
||||
"github.com/0xJacky/Nginx-UI/internal/cluster"
|
||||
"github.com/0xJacky/Nginx-UI/internal/cron"
|
||||
"github.com/0xJacky/Nginx-UI/internal/docker"
|
||||
"github.com/0xJacky/Nginx-UI/internal/passkey"
|
||||
"github.com/0xJacky/Nginx-UI/internal/validation"
|
||||
"github.com/0xJacky/Nginx-UI/model"
|
||||
|
@ -35,6 +36,7 @@ func Boot() {
|
|||
InitCryptoSecret,
|
||||
validation.Init,
|
||||
cache.Init,
|
||||
CheckAndCleanupOTAContainers,
|
||||
}
|
||||
|
||||
syncs := []func(){
|
||||
|
@ -129,3 +131,14 @@ func InitJsExtensionType() {
|
|||
// See https://github.com/golang/go/issues/32350
|
||||
_ = mime.AddExtensionType(".js", "text/javascript; charset=utf-8")
|
||||
}
|
||||
|
||||
// CheckAndCleanupOTAContainers Check and cleanup OTA update temporary containers
|
||||
func CheckAndCleanupOTAContainers() {
|
||||
// Execute the third step cleanup operation at startup
|
||||
err := docker.UpgradeStepThree()
|
||||
if err != nil {
|
||||
logger.Error("Failed to cleanup OTA containers:", err)
|
||||
} else {
|
||||
logger.Info("OTA container cleanup completed successfully")
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue