-
Notifications
You must be signed in to change notification settings - Fork 22
Implement graceful termination #39
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -86,12 +86,13 @@ func (s *serverImpl) RunWithLifecycle(lifecycle service.Lifecycle) error { | |
s.wg.Add(1) | ||
go s.handleConnection(tcpConn) | ||
} | ||
|
||
lifecycle.Stopping() | ||
s.shuttingDown = true | ||
allClientsExited := make(chan struct{}) | ||
shutdownHandlerExited := make(chan struct{}, 1) | ||
s.disconnectClients(lifecycle, allClientsExited) | ||
go s.shutdownHandlers.Shutdown(lifecycle.ShutdownContext()) | ||
go s.disconnectClients(lifecycle, allClientsExited) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If this does not work, this is actually a bug. The shutdown handlers should get a chance to run before the clients are disconnected forcefully. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This seems to work the way it's intended currently: I delay the shutdown handler until all clients are disconnected because running them means shutting down things like the metrics server which may be needed if the delay is too long, we want to know what happened during that time. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In the future it may be beneficial to add a message when a shutdown is triggered prompting all users to re-connect. |
||
go s.shutdownHandler(lifecycle, shutdownHandlerExited) | ||
|
||
s.wg.Wait() | ||
|
@@ -322,6 +323,7 @@ func (s *serverImpl) createConfiguration( | |
ServerVersion: s.cfg.ServerVersion.String(), | ||
BannerCallback: func(conn ssh.ConnMetadata) string { return s.cfg.Banner }, | ||
} | ||
|
||
for _, key := range s.hostKeys { | ||
serverConfig.AddHostKey(key) | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -182,10 +182,10 @@ func startServices(cfg config.AppConfig, loggerFactory log.LoggerFactory) error | |
return err | ||
} | ||
|
||
return startPool(pool, lifecycle) | ||
return startPool(pool, lifecycle, cfg) | ||
} | ||
|
||
func startPool(pool Service, lifecycle service.Lifecycle) error { | ||
func startPool(pool Service, lifecycle service.Lifecycle, cfg config.AppConfig) error { | ||
starting := make(chan struct{}) | ||
lifecycle.OnStarting( | ||
func(s service.Service, l service.Lifecycle) { | ||
|
@@ -204,12 +204,15 @@ func startPool(pool Service, lifecycle service.Lifecycle) error { | |
rotateSignals := make(chan os.Signal, 1) | ||
signal.Notify(exitSignals, exitSignalList...) | ||
signal.Notify(rotateSignals, rotateSignalList...) | ||
|
||
deadline := cfg.SSH.GracefulTerminationDeadline + 5 * time.Second | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 5 seconds plus is a very arbitrary number, we should come up with something better. |
||
|
||
go func() { | ||
if _, ok := <-exitSignals; ok { | ||
// ok means the channel wasn't closed | ||
shutdownContext, cancelFunc := context.WithTimeout( | ||
context.Background(), | ||
20*time.Second, | ||
deadline, | ||
) | ||
defer cancelFunc() | ||
lifecycle.Stop( | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
ContainerSSH will not always run on SSH only, we plan a web client too. This deadline should not be tied to SSH.