Skip to content

Commit

Permalink
libpod: restart always reconfigure the netns
Browse files Browse the repository at this point in the history
Always teardown the network, trying to reuse the netns has caused
a significant amount of bugs in this code here. It also never worked
for containers with user namespaces. So once and for all simplify this
by never reusing the netns. Originally this was done to have a faster
restart of containers but with netavark now we are much faster so it
shouldn't be that noticeable in practice. It also makes more sense to
reconfigure the netns as it is likely that the container exited due
some broken network state in which case reusing would just cause more
harm than good.

The main motivation for this change was the pasta change to use
--dns-forward by default. As the restarted contianer had no idea what
nameserver to use as pasta just kept running.

Signed-off-by: Paul Holzinger <[email protected]>
  • Loading branch information
Luap99 committed Mar 19, 2024
1 parent dc1795b commit 15b8bb7
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 38 deletions.
28 changes: 11 additions & 17 deletions libpod/container_internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -301,29 +301,23 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
}
}()

// Now this is a bit of a mess, normally we try to reuse the netns but if a userns
// is used this is not possible as it must be owned by the userns which is created
// by the oci runtime. Thus we need to teardown the netns so that the runtime
// creates the users+netns and then we setup in completeNetworkSetup() again.
if c.config.PostConfigureNetNS {
if err := c.cleanupNetwork(); err != nil {
return false, err
}
// Always teardown the network, trying to reuse the netns has caused
// a significant amount of bugs in this code here. It also never worked
// for containers with user namespaces. So once and for all simplify this
// by never reusing the netns. Originally this was done to have a faster
// restart of containers but with netavark now we are much faster so it
// shouldn't be that noticeable in practice. It also makes more sense to
// reconfigure the netns as it is likely that the container exited due
// some broken network state in which case reusing would just cause more
// harm than good.
if err := c.cleanupNetwork(); err != nil {
return false, err
}

if err := c.prepare(); err != nil {
return false, err
}

// only do this if the container is not in a userns, if we are the cleanupNetwork()
// was called above and a proper network setup is needed which is part of the init() below.
if !c.config.PostConfigureNetNS {
// set up slirp4netns again because slirp4netns will die when conmon exits
if err := c.setupRootlessNetwork(); err != nil {
return false, err
}
}

if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, true); err != nil {
Expand Down
21 changes: 0 additions & 21 deletions libpod/container_internal_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -413,27 +413,6 @@ func (c *Container) getOCICgroupPath() (string, error) {
}
}

// If the container is rootless, set up the slirp4netns network
func (c *Container) setupRootlessNetwork() error {
// set up slirp4netns again because slirp4netns will die when conmon exits
if c.config.NetMode.IsSlirp4netns() {
err := c.runtime.setupSlirp4netns(c, c.state.NetNS)
if err != nil {
return err
}
}

// set up rootlesskit port forwarder again since it dies when conmon exits
// we use rootlesskit port forwarder only as rootless and when bridge network is used
if rootless.IsRootless() && c.config.NetMode.IsBridge() && len(c.config.PortMappings) > 0 {
err := c.runtime.setupRootlessPortMappingViaRLK(c, c.state.NetNS, c.state.NetworkStatus)
if err != nil {
return err
}
}
return nil
}

func openDirectory(path string) (fd int, err error) {
return unix.Open(path, unix.O_RDONLY|unix.O_PATH, 0)
}
Expand Down

0 comments on commit 15b8bb7

Please sign in to comment.