Skip to content

Ensure IO limits are written after first workspace is stopped #9404

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 19, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion components/ws-daemon/pkg/cgroup/cgroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func (host *PluginHost) WorkspaceAdded(ctx context.Context, ws *dispatch.Workspa

cgroupPath, err := disp.Runtime.ContainerCGroupPath(context.Background(), ws.ContainerID)
if err != nil {
return xerrors.Errorf("cannot start governer: %w", err)
return xerrors.Errorf("cannot get cgroup path for container %s: %w", ws.ContainerID, err)
}

for _, plg := range host.Plugins {
Expand Down
47 changes: 33 additions & 14 deletions components/ws-daemon/pkg/cgroup/plugin_iolimit_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,35 @@ import (
"github.com/gitpod-io/gitpod/common-go/log"
)

type IOLimiterV2 struct {
var clearLimits = ioLimitOptions{
WriteBytesPerSecond: 0,
ReadBytesPerSecond: 0,
WriteIOPs: 0,
ReadIOPs: 0,
}

type ioLimitOptions struct {
WriteBytesPerSecond int64
ReadBytesPerSecond int64
ReadIOPs int64
WriteIOPs int64
ReadIOPs int64
}

type IOLimiterV2 struct {
limits ioLimitOptions
}

func NewIOLimiterV2(writeBytesPerSecond, readBytesPerSecond, writeIOPs, readIOPs int64) *IOLimiterV2 {
limits := ioLimitOptions{
WriteBytesPerSecond: writeBytesPerSecond,
ReadBytesPerSecond: readBytesPerSecond,
WriteIOPs: writeIOPs,
ReadIOPs: readIOPs,
}

return &IOLimiterV2{
limits: limits,
}
}

func (c *IOLimiterV2) Name() string { return "iolimiter-v2" }
Expand All @@ -42,19 +66,15 @@ func (c *IOLimiterV2) Apply(ctx context.Context, basePath, cgroupPath string) er
// Prior to shutting down though, we need to reset the IO limits to ensure we don't have
// processes stuck in the uninterruptable "D" (disk sleep) state. This would prevent the
// workspace pod from shutting down.
c.WriteBytesPerSecond = 0
c.ReadBytesPerSecond = 0
c.WriteIOPs = 0
c.ReadIOPs = 0

err := c.writeIOMax(ioMaxFile)
err := c.writeIOMax(ioMaxFile, clearLimits)
if err != nil {
log.WithError(err).WithField("cgroupPath", cgroupPath).Error("cannot write IO limits")
}
log.WithField("cgroupPath", cgroupPath).Debug("stopping io limiting")
return
case <-ticker.C:
err := c.writeIOMax(ioMaxFile)
err := c.writeIOMax(ioMaxFile, c.limits)
if err != nil {
log.WithError(err).WithField("cgroupPath", cgroupPath).Error("cannot write IO limits")
}
Expand All @@ -64,7 +84,7 @@ func (c *IOLimiterV2) Apply(ctx context.Context, basePath, cgroupPath string) er
return nil
}

func (c *IOLimiterV2) writeIOMax(cgroupPath string) error {
func (c *IOLimiterV2) writeIOMax(cgroupPath string, options ioLimitOptions) error {
iostat, err := os.ReadFile(filepath.Join(string(cgroupPath), "io.stat"))
if os.IsNotExist(err) {
// cgroup gone is ok due to the dispatch/container race
Expand All @@ -80,7 +100,6 @@ func (c *IOLimiterV2) writeIOMax(cgroupPath string) error {
// 9 block Metadisk (RAID) devices
// source https://www.kernel.org/doc/Documentation/admin-guide/devices.txt
var classesToLimit = []string{"8", "9"}

var devs []string
for _, line := range strings.Split(string(iostat), "\n") {
fields := strings.Fields(line)
Expand All @@ -100,10 +119,10 @@ func (c *IOLimiterV2) writeIOMax(cgroupPath string) error {
limit := fmt.Sprintf(
"%s wbps=%s rbps=%s wiops=%s riops=%s",
dev,
getLimit(c.WriteBytesPerSecond),
getLimit(c.ReadBytesPerSecond),
getLimit(c.WriteIOPs),
getLimit(c.ReadIOPs),
getLimit(options.WriteBytesPerSecond),
getLimit(options.ReadBytesPerSecond),
getLimit(options.WriteIOPs),
getLimit(options.ReadIOPs),
)

log.WithField("limit", limit).WithField("ioMaxPath", ioMaxPath).Debug("creating io.max limit")
Expand Down
7 changes: 1 addition & 6 deletions components/ws-daemon/pkg/daemon/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,7 @@ func NewDaemon(config Config, reg prometheus.Registerer) (*Daemon, error) {
&cgroup.CacheReclaim{},
&cgroup.FuseDeviceEnablerV1{},
&cgroup.FuseDeviceEnablerV2{},
&cgroup.IOLimiterV2{
WriteBytesPerSecond: config.IOLimit.WriteBWPerSecond.Value(),
ReadBytesPerSecond: config.IOLimit.ReadBWPerSecond.Value(),
WriteIOPs: config.IOLimit.WriteIOPS,
ReadIOPs: config.IOLimit.ReadIOPS,
},
cgroup.NewIOLimiterV2(config.IOLimit.WriteBWPerSecond.Value(), config.IOLimit.ReadBWPerSecond.Value(), config.IOLimit.WriteIOPS, config.IOLimit.ReadIOPS),
)
if err != nil {
return nil, err
Expand Down