forked from kata-containers/kata-containers
-
Notifications
You must be signed in to change notification settings - Fork 0
/
wait.go
180 lines (153 loc) · 4.37 KB
/
wait.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
// Copyright (c) 2018 HyperHQ Inc.
//
// SPDX-License-Identifier: Apache-2.0
//
package containerdshim
import (
"context"
"os"
"path"
"time"
"github.com/containerd/containerd/api/events"
"github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/mount"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/oci"
)
const defaultCheckInterval = 1 * time.Second
func wait(ctx context.Context, s *service, c *container, execID string) (int32, error) {
var execs *exec
var err error
processID := c.id
if execID == "" {
//wait until the io closed, then wait the container
<-c.exitIOch
} else {
execs, err = c.getExec(execID)
if err != nil {
return exitCode255, err
}
<-execs.exitIOch
//This wait could be triggered before exec start which
//will get the exec's id, thus this assignment must after
//the exec exit, to make sure it get the exec's id.
processID = execs.id
}
ret, err := s.sandbox.WaitProcess(ctx, c.id, processID)
if err != nil {
shimLog.WithError(err).WithFields(logrus.Fields{
"container": c.id,
"pid": processID,
}).Error("Wait for process failed")
}
timeStamp := time.Now()
s.mu.Lock()
if execID == "" {
// Take care of the use case where it is a sandbox.
// Right after the container representing the sandbox has
// been deleted, let's make sure we stop and delete the
// sandbox.
if c.cType.IsSandbox() {
// cancel watcher
if s.monitor != nil {
s.monitor <- nil
}
if err = s.sandbox.Stop(ctx, true); err != nil {
shimLog.WithField("sandbox", s.sandbox.ID()).Error("failed to stop sandbox")
}
if err = s.sandbox.Delete(ctx); err != nil {
shimLog.WithField("sandbox", s.sandbox.ID()).Error("failed to delete sandbox")
}
} else {
if _, err = s.sandbox.StopContainer(ctx, c.id, false); err != nil {
shimLog.WithError(err).WithField("container", c.id).Warn("stop container failed")
}
}
c.status = task.StatusStopped
c.exit = uint32(ret)
c.exitTime = timeStamp
c.exitCh <- uint32(ret)
} else {
execs.status = task.StatusStopped
execs.exitCode = ret
execs.exitTime = timeStamp
execs.exitCh <- uint32(ret)
}
s.mu.Unlock()
go cReap(s, int(ret), c.id, execID, timeStamp)
return ret, nil
}
func watchSandbox(ctx context.Context, s *service) {
if s.monitor == nil {
return
}
err := <-s.monitor
if err == nil {
return
}
s.monitor = nil
s.mu.Lock()
defer s.mu.Unlock()
// sandbox malfunctioning, cleanup as much as we can
shimLog.WithError(err).Warn("sandbox stopped unexpectedly")
err = s.sandbox.Stop(ctx, true)
if err != nil {
shimLog.WithError(err).Warn("stop sandbox failed")
}
err = s.sandbox.Delete(ctx)
if err != nil {
shimLog.WithError(err).Warn("delete sandbox failed")
}
for _, c := range s.containers {
if !c.mounted {
continue
}
rootfs := path.Join(c.bundle, "rootfs")
shimLog.WithField("rootfs", rootfs).WithField("container", c.id).Debug("container umount rootfs")
if err := mount.UnmountAll(rootfs, 0); err != nil {
shimLog.WithError(err).Warn("failed to cleanup rootfs mount")
}
}
// Existing container/exec will be cleaned up by its waiters.
// No need to send async events here.
}
func watchOOMEvents(ctx context.Context, s *service) {
if s.sandbox == nil {
return
}
for {
select {
case <-s.ctx.Done():
return
default:
containerID, err := s.sandbox.GetOOMEvent(ctx)
if err != nil {
shimLog.WithError(err).Warn("failed to get OOM event from sandbox")
// If the GetOOMEvent call is not implemented, then the agent is most likely an older version,
// stop attempting to get OOM events.
// for rust agent, the response code is not found
if isGRPCErrorCode(codes.NotFound, err) || err.Error() == "Dead agent" {
return
}
time.Sleep(defaultCheckInterval)
continue
}
// write oom file for CRI-O
if c, ok := s.containers[containerID]; ok && oci.IsCRIOContainerManager(c.spec) {
oomPath := path.Join(c.bundle, "oom")
shimLog.Infof("write oom file to notify CRI-O: %s", oomPath)
f, err := os.OpenFile(oomPath, os.O_CREATE, 0666)
if err != nil {
shimLog.WithError(err).Warnf("failed to write oom file %s", oomPath)
} else {
f.Close()
}
}
// publish event for containerd
s.send(&events.TaskOOM{
ContainerID: containerID,
})
}
}
}