From 766e0d3479725cccbe291bb80fc415670fb64157 Mon Sep 17 00:00:00 2001 From: yekelu Date: Mon, 30 Sep 2024 10:36:25 +0800 Subject: [PATCH] add testcase for CPI service --- pkg/services/cpi/cpi_test.go | 497 +++++++++++++++++ pkg/services/cpi/dataSeries_test.go | 283 ++++++++++ pkg/services/cpi/podStatus_test.go | 833 ++++++++++++++++++++++++++++ 3 files changed, 1613 insertions(+) create mode 100644 pkg/services/cpi/cpi_test.go create mode 100644 pkg/services/cpi/dataSeries_test.go create mode 100644 pkg/services/cpi/podStatus_test.go diff --git a/pkg/services/cpi/cpi_test.go b/pkg/services/cpi/cpi_test.go new file mode 100644 index 0000000..5254c73 --- /dev/null +++ b/pkg/services/cpi/cpi_test.go @@ -0,0 +1,497 @@ +// Copyright (c) Huawei Technologies Co., Ltd. 2024. All rights reserved. +// rubik licensed under the Mulan PSL v2. +// You can use this software according to the terms and conditions of the Mulan PSL v2. +// You may obtain a copy of Mulan PSL v2 at: +// http://license.coscl.org.cn/MulanPSL2 +// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR +// PURPOSE. +// See the Mulan PSL v2 for more details. +// Author: Kelu Ye +// Date: 2024-09-19 +// Description: This file is used for testing cpi.go + +// Package cpi is for CPU Interference Detection Service +package cpi + +import ( + "context" + "os" + "path" + "testing" + "time" + + "isula.org/rubik/pkg/common/constant" + "isula.org/rubik/pkg/common/perf" + "isula.org/rubik/pkg/core/typedef" + "isula.org/rubik/pkg/core/typedef/cgroup" + "isula.org/rubik/pkg/podmanager" +) + +var ( + fooOnlineCon = &typedef.ContainerInfo{ + Name: "onlineCon", + ID: "onlineCon", + Hierarchy: cgroup.Hierarchy{ + MountPoint: "/", + Path: "online/testCon1"}, + LimitResources: make(typedef.ResourceMap), + } + fooOfflineCon = &typedef.ContainerInfo{ + Name: "offlineCon", + ID: "onlineCon", + Hierarchy: cgroup.Hierarchy{ + MountPoint: "/", + Path: "offline/testCon1"}, + LimitResources: make(typedef.ResourceMap), + } + fooOnlinePod = &typedef.PodInfo{ + Name: "onlinePod", + UID: "onlinePod", + Hierarchy: cgroup.Hierarchy{ + MountPoint: "/", + Path: "online", + }, + Annotations: map[string]string{ + constant.CpiAnnotationKey: "online", + }, + IDContainersMap: map[string]*typedef.ContainerInfo{ + fooOnlineCon.ID: fooOnlineCon, + }, + } + fooOfflinePod = &typedef.PodInfo{ + Name: "offlinePod", + UID: "offlinePod", + Hierarchy: cgroup.Hierarchy{ + MountPoint: "/", + Path: "offline", + }, + Annotations: map[string]string{ + constant.CpiAnnotationKey: "offline", + }, + IDContainersMap: map[string]*typedef.ContainerInfo{ + fooOfflineCon.ID: fooOfflineCon, + }, + } + podWithNoAnno = &typedef.PodInfo{ + Name: "pod", + UID: "pod", + Hierarchy: cgroup.Hierarchy{ + MountPoint: "/", + Path: "pod", + }, + IDContainersMap: map[string]*typedef.ContainerInfo{ + fooOfflineCon.ID: fooOfflineCon, + }, + } + podWithErrorAnno = &typedef.PodInfo{ + Name: "offlinePod", + UID: "offlinePod", + Hierarchy: cgroup.Hierarchy{ + MountPoint: "/", + Path: "offline", + }, + Annotations: map[string]string{ + constant.CpiAnnotationKey: "error", + }, + IDContainersMap: map[string]*typedef.ContainerInfo{ + fooOfflineCon.ID: fooOfflineCon, + }, + } +) + +// TestCpiServiceRun tests Run function +func TestCpiServiceRun(t *testing.T) { + const name = "cpi" + tests := []struct { + name string + }{ + { + name: "Test CPI service run and cancel", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + qt := newCpiService(name) + pm := &podmanager.PodManager{ + Pods: &podmanager.PodCache{ + Pods: map[string]*typedef.PodInfo{ + fooOnlinePod.UID: fooOnlinePod, + }, + }, + } + ctx, cancel := context.WithCancel(context.Background()) + qt.Viewer = pm + go qt.Run(ctx) + const sleepTime = time.Millisecond * 200 + time.Sleep(sleepTime) + cancel() + }) + } +} + +// TestCpiServicePreStart tests PreStart function +func TestCpiServicePreStart(t *testing.T) { + _, err := perf.CgroupStat(cgroup.AbsoluteCgroupPath("perf_event", "", ""), time.Millisecond, cpiConf) + if err != nil { + return + } + createFile := func(dir string, fileName string, content string) { + if err := os.MkdirAll(dir, constant.DefaultDirMode); err != nil { + t.Errorf("error creating temp dir: %v", err) + } + file, err := os.Create(path.Join(dir, fileName)) + file.Chmod(constant.DefaultFileMode) + if err != nil { + t.Errorf("error creating quota file: %v", err) + } + if _, err := file.Write([]byte(content)); err != nil { + t.Errorf("error writing to quota file: %v", err) + } + file.Close() + } + var ( + pm = &podmanager.PodManager{ + Pods: &podmanager.PodCache{ + Pods: map[string]*typedef.PodInfo{ + fooOnlinePod.UID: fooOnlinePod, + fooOfflinePod.UID: fooOfflinePod, + }, + }, + } + name = "Cpi" + service = newCpiService(name) + fileName = "cpu.cfs_quota_us" + ) + + createFile(path.Join("/cpu", fooOfflinePod.MountPoint, fooOfflinePod.Path), fileName, "10000") + createFile(path.Join("/cpu", fooOfflineCon.MountPoint, fooOfflineCon.Path), fileName, "10000") + defer os.RemoveAll("/cpu") + testName := "cpi- test Prestart" + t.Run(testName, func(t *testing.T) { + service.PreStart(pm) + if len(service.onlineTasks) != 1 || len(service.offlineTasks) != 1 { + t.Errorf("PreStart failed: expected 1 online task and 1 offline task, got %d online tasks and %d offline tasks", len(service.onlineTasks), len(service.offlineTasks)) + } + }) +} + +// TestCpiServiceAddPod tests AddPod function +func TestCpiServiceAddPod(t *testing.T) { + tests := []struct { + name string + podInfo *typedef.PodInfo + wantErr bool + }{ + { + name: "Add onlinePod", + podInfo: fooOnlinePod, + wantErr: false, + }, + { + name: "Add offlinePod", + podInfo: fooOfflinePod, + wantErr: false, + }, + { + name: "cpi test add errorAnoPod", + podInfo: podWithErrorAnno, + wantErr: false, + }, + { + name: "cpi test add no annotation pod", + podInfo: podWithNoAnno, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service := newCpiService("Cpi") + if err := service.AddPod(tt.podInfo); (err != nil) != tt.wantErr { + t.Errorf("CpiService.AddPod() error = %v, wantErr %v", err, tt.wantErr) + } + + }) + } +} + +// TestCpiServiceDeletePod tests DeletePod function +func TestCpiServiceDeletePod(t *testing.T) { + tests := []struct { + name string + podInfo *typedef.PodInfo + wantErr bool + }{ + { + name: "cpi test delete onlinePod", + podInfo: fooOnlinePod, + wantErr: false, + }, + { + name: "cpi test delete offlinePod", + podInfo: fooOfflinePod, + wantErr: false, + }, + { + name: "cpi test delete error noPod", + podInfo: podWithErrorAnno, + wantErr: false, + }, + { + name: "cpi test delete no annotation pod", + podInfo: podWithNoAnno, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service := newCpiService("Cpi") + service.AddPod(tt.podInfo) + if err := service.DeletePod(tt.podInfo); (err != nil) != tt.wantErr { + t.Errorf("CpiService.AddPod() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +// TestCpiServiceCollectData tests CollectData function +func TestCpiServiceCollectData(t *testing.T) { + onlinePod := &typedef.PodInfo{ + Name: "onlinePod", + UID: "onlinePod", + Hierarchy: cgroup.Hierarchy{}, + Annotations: map[string]string{ + constant.CpiAnnotationKey: "online", + }, + } + offlinePod := &typedef.PodInfo{ + Name: "offlinePod", + UID: "offlinePod", + Hierarchy: cgroup.Hierarchy{}, + Annotations: map[string]string{ + constant.CpiAnnotationKey: "offline", + }, + } + t.Run("cpu test collect data", func(t *testing.T) { + service := newCpiService("Cpi") + service.AddPod(onlinePod) + service.AddPod(offlinePod) + service.collectData(defaultSampleDur) + }) +} + +// TestCpiServiceIdentifyAntagonists tests IdentifyAntagonists function +func TestCpiServiceIdentifyAntagonists(t *testing.T) { + createFile := func(dir string, fileName string, content string) { + if err := os.MkdirAll(dir, constant.DefaultDirMode); err != nil { + t.Errorf("error creating temp dir: %v", err) + } + file, err := os.Create(path.Join(dir, fileName)) + file.Chmod(constant.DefaultFileMode) + if err != nil { + t.Errorf("error creating quota file: %v", err) + } + if _, err := file.Write([]byte(content)); err != nil { + t.Errorf("error writing to quota file: %v", err) + } + file.Close() + } + var fileName = "cpu.cfs_quota_us" + tests := []struct { + name string + onlineTasks map[string]*podStatus + offlineTasks map[string]*podStatus + limitedIndex int + }{ + { + name: "without outlier", + onlineTasks: map[string]*podStatus{ + "testOnline1": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "online", + }, + isOnline: true, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 2.8, 2: 2.8, 3: 2.8, 4: 2.8, 5: 2.8, 6: 2.8, 7: 2.8, 8: 2.8, 9: 2.8, 10: 2.8}, + }, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 3, 8: 3, 9: 3, 10: 3}, + }, + cpiMean: 2.8, + stdDev: 0, + count: 11, + }, + }, + limitedIndex: -1, + }, + { + name: "with out offlineTasks", + onlineTasks: map[string]*podStatus{ + "testOnline1": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "online", + }, + isOnline: true, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 2.8, 2: 2.8, 3: 2.8, 4: 2.8, 5: 2.8, 6: 2.8, 7: 2.8, 8: 2.8, 9: 2.8, 10: 2.8}, + }, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 3, 8: 3, 9: 3, 10: 3}, + }, + cpiMean: 2.8, + stdDev: 0, + count: 11, + }, + }, + limitedIndex: -1, + }, + { + name: "No antagonist, but offline tasks exist.", + onlineTasks: map[string]*podStatus{ + "testOnline1": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "online", + }, + isOnline: true, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 2.1, 2: 2.1, 3: 2.1, 4: 2.1, 5: 2.1, 6: 2.1, 7: 2.1, 8: 2.1, 9: 2.1, 10: 2.1}, + }, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 3, 8: 3, 9: 3, 10: 3}, + }, + cpiMean: 2.0, + stdDev: 0, + count: 11, + }, + }, + offlineTasks: map[string]*podStatus{ + "maxCpuUsagePod": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "offline", + }, + isOnline: false, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 6, 2: 6, 3: 6, 4: 6, 5: 6, 6: 6, 7: 6, 8: 6, 9: 6, 10: 6}, + }, + }, + }, + limitedIndex: 0, + }, + { + name: "Antagonist detected.", + onlineTasks: map[string]*podStatus{ + "testOnline1": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "online", + }, + isOnline: true, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 2.1, 2: 2.1, 3: 2.1, 4: 3.1, 5: 2.1, 6: 2.1, 7: 2.1, 8: 2.1, 9: 2.1, 10: 2.1}, + }, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 3, 8: 3, 9: 3, 10: 3}, + }, + cpiMean: 2.0, + stdDev: 0, + count: 11, + }, + }, + offlineTasks: map[string]*podStatus{ + "maxCpuUsagePod": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "offline1", + }, + isOnline: false, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 6, 2: 6, 3: 6, 4: 6, 5: 6, 6: 6, 7: 6, 8: 6, 9: 6, 10: 6}, + }, + }, + "antagonistPod": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "offline2", + }, + isOnline: false, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + data: map[int64]float64{1: 1, 2: 1, 3: 1, 4: 10, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1}, + }, + }, + }, + limitedIndex: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service := &CpiService{ + onlineTasks: tt.onlineTasks, + offlineTasks: tt.offlineTasks, + } + now := time.Now() + for podUID, onlinePod := range tt.onlineTasks { + newOnline, _ := newPodStatus(&typedef.PodInfo{Hierarchy: *onlinePod.Hierarchy}, true, podUID) + + for _, minutes := range onlinePod.cpiSeries.timeline { + newOnline.cpiSeries.add(onlinePod.cpiSeries.data[minutes], now.Add(time.Duration(minutes-10)*time.Minute).UnixNano()) + } + + for _, minutes := range onlinePod.cpuUsageSeries.timeline { + newOnline.cpuUsageSeries.add(onlinePod.cpuUsageSeries.data[minutes], now.Add(time.Duration(minutes-10)*time.Minute).UnixNano()) + } + + newOnline.cpiMean = onlinePod.cpiMean + newOnline.stdDev = onlinePod.stdDev + newOnline.count = onlinePod.count + + service.onlineTasks[podUID] = newOnline + } + + for podUID, offlinePod := range tt.offlineTasks { + createFile(path.Join("/cpu", offlinePod.MountPoint, offlinePod.Path), fileName, "10000") + defer os.RemoveAll("/cpu") + newOffline := &podStatus{ + isOnline: false, + Hierarchy: offlinePod.Hierarchy, + cpuUsageSeries: newDataSeries(), + } + for _, minutes := range offlinePod.cpuUsageSeries.timeline { + newOffline.cpuUsageSeries.add(offlinePod.cpuUsageSeries.data[minutes], now.Add(time.Duration(minutes-10)*time.Minute).UnixNano()) + } + service.offlineTasks[podUID] = newOffline + } + + service.identifyAntagonists(time.Minute*10, time.Minute*5, time.Second*2, defaultAntagonistMetric, defaultLimitQuota) + + if tt.limitedIndex == -1 { + return + } + var podUid string + if tt.limitedIndex == 0 { + podUid = "maxCpuUsagePod" + } else { + podUid = "antagonistPod" + } + limitedPod := service.offlineTasks[podUid] + quota := limitedPod.GetCgroupAttr(cpuQuotaKey).Value + if quota != defaultLimitQuota { + t.Errorf("Pod has not been limited as expected") + } + }) + } +} diff --git a/pkg/services/cpi/dataSeries_test.go b/pkg/services/cpi/dataSeries_test.go new file mode 100644 index 0000000..c716b0a --- /dev/null +++ b/pkg/services/cpi/dataSeries_test.go @@ -0,0 +1,283 @@ +// Copyright (c) Huawei Technologies Co., Ltd. 2024. All rights reserved. +// rubik licensed under the Mulan PSL v2. +// You can use this software according to the terms and conditions of the Mulan PSL v2. +// You may obtain a copy of Mulan PSL v2 at: +// http://license.coscl.org.cn/MulanPSL2 +// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR +// PURPOSE. +// See the Mulan PSL v2 for more details. +// Author: Kelu Ye +// Date: 2024-09-19 +// Description: This file is used for testing dataSeries.go + +// Package cpi is for CPU Interference Detection Service +package cpi + +import ( + "math" + "testing" + "time" +) + +const epsilon = 1e-9 + +// TestNewDataSeries tests NewDataSeries function +func TestNewDataSeries(t *testing.T) { + t.Run("test newDataSeries", func(t *testing.T) { + if got := newDataSeries(); got == nil { + t.Errorf("newDataSeries() returns nil") + } else if got.timeline == nil || got.data == nil { + t.Errorf("newDataSeries() hasn't been initialized correctly") + } + }) +} + +// TestDataSeriesAdd tests Add function +func TestDataSeriesAdd(t *testing.T) { + testSeries := newDataSeries() + type args struct { + value float64 + nano int64 + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "nano is negative", + args: args{ + value: 0.1, + nano: -1 * time.Now().UnixNano(), + }, + wantErr: true, + }, + { + name: "valid timestamp", + args: args{ + value: 0.1, + nano: time.Now().UnixNano(), + }, + wantErr: false, + }, + { + name: "new timestamp is earlier than the previous timestamp", + args: args{ + value: 0.1, + nano: time.Now().Add(-1 * time.Minute).UnixNano(), + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := testSeries.add(tt.args.value, tt.args.nano); (err != nil) != tt.wantErr { + t.Fatalf("testSeries.add() error = %v, wantErr %v", err, tt.wantErr) + } + if !tt.wantErr { + lastKey := testSeries.timeline[len(testSeries.timeline)-1] + lastValue, _ := testSeries.getData(lastKey) + if lastKey != tt.args.nano { + t.Errorf("testSeries.add() added an incorrect key: got %v, want %v", lastKey, tt.args.nano) + } + if lastValue != tt.args.value { + t.Errorf("testSeries.add() added an incorrect value: got %v, want %v", lastValue, tt.args.value) + } + } + }) + } +} + +// TestDataSeriesRangeSearch tests RangeSearch function +func TestDataSeriesRangeSearch(t *testing.T) { + testSeries, now := generateTestSeries() + type args struct { + start int64 + end int64 + } + tests := []struct { + name string + args args + wantNil bool + }{ + { + name: "startTime is greater than endTime", + args: args{ + start: now.Add(-4 * time.Minute).UnixNano(), + end: now.Add(-6 * time.Minute).UnixNano(), + }, + wantNil: true, + }, + { + name: "start is greater than all timeStamp", + args: args{ + start: now.Add(time.Minute).UnixNano(), + end: now.UnixNano(), + }, + wantNil: true, + }, + { + name: "end is smaller than all timeStamp", + args: args{ + start: now.Add(time.Minute).UnixNano(), + end: now.Add(-10 * time.Minute).UnixNano(), + }, + wantNil: true, + }, + { + name: "everything is fine", + args: args{ + start: now.Add(-6 * time.Minute).UnixNano(), + end: now.Add(-4 * time.Minute).UnixNano(), + }, + wantNil: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := testSeries.rangeSearch(tt.args.start, tt.args.end) + if (got == nil) != tt.wantNil { + t.Errorf("testSeries.rangeSearch() = %v, want %v", got, tt.wantNil) + } + if got != nil && tt.wantNil == false { + targetSeries := newDataSeries() + for i := 9; i >= 0; i-- { + timePoint := now.Add(time.Duration(-i) * time.Minute).UnixNano() + if (timePoint >= tt.args.start) && (timePoint <= tt.args.end) { + targetSeries.add(float64(i), timePoint) + } + } + if !dataSeriesEqual(got, targetSeries) { + t.Fatalf("the value of expire errors") + } + } + }) + } +} + +// TestDataSeriesNormalize tests Normalize function +func TestDataSeriesNormalize(t *testing.T) { + testSeries, now := generateTestSeries() + tests := []struct { + name string + want *dataSeries + }{ + { + name: "everything is fine", + want: newDataSeries(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := testSeries.normalize() + var sum float64 + for i := 9; i >= 0; i-- { + sum += float64(i) + } + now = now.Add(-10 * time.Minute) + for i := 9; i >= 0; i-- { + now = now.Add(time.Minute) + tt.want.add(float64(i)/sum, now.UnixNano()) + } + if !dataSeriesEqual(got, tt.want) { + t.Fatalf("the value of expire errors") + } + }) + } +} + +// TestDataSeriesExpire tests Expire function +func TestDataSeriesExpire(t *testing.T) { + testSeries, now := generateTestSeries() + tempStore := cloneDataSeries(testSeries) + tests := []struct { + name string + beforeNano int64 + want *dataSeries + }{ + { + name: "all expired", + beforeNano: now.Add(time.Minute).UnixNano(), + want: newDataSeries(), + }, + { + name: "nothing expired", + beforeNano: now.Add(-10 * time.Minute).UnixNano(), + want: newDataSeries(), + }, + { + name: "partially expired", + beforeNano: now.Add(-4 * time.Minute).UnixNano(), + want: newDataSeries(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + for _, nano := range testSeries.timeline { + if nano < tt.beforeNano { + value, ok := testSeries.getData(nano) + if !ok { + continue + } + tt.want.add(value, nano) + } else { + break + } + } + got := testSeries.expire(tt.beforeNano) + + if !dataSeriesEqual(got, tt.want) { + t.Fatalf("expire function returned incorrect values") + } + testSeries = cloneDataSeries(tempStore) + }) + } +} + +// generateTestSeries is used to generate dataSeries for test +func generateTestSeries() (*dataSeries, time.Time) { + now := time.Now().Add(-10 * time.Minute) + testSeries := newDataSeries() + for i := 9; i >= 0; i-- { + now = now.Add(time.Minute) + testSeries.add(float64(i), now.UnixNano()) + } + return testSeries, now +} + +// cloneDataSeries creates a deep copy of the given dataSeries, replicating its timeline and data values. +func cloneDataSeries(origin *dataSeries) *dataSeries { + res := newDataSeries() + for _, nano := range origin.timeline { + value, ok := origin.getData(nano) + if !ok { + continue + } + res.add(value, nano) + } + return res +} + +// dataSeriesEqual compares two dataSeries for equality, checking if both have the same timeline and data values within a small tolerance. +func dataSeriesEqual(first *dataSeries, second *dataSeries) bool { + if first == nil || second == nil { + if first == nil && second == nil { + return true + } else { + return false + } + } + + if len(first.timeline) != len(second.timeline) { + return false + } + for _, nano := range second.timeline { + secondValue, _ := second.getData(nano) + firstValue, ok := first.getData(nano) + if !ok || math.Abs(firstValue-secondValue) > epsilon { + return false + } + } + return true +} diff --git a/pkg/services/cpi/podStatus_test.go b/pkg/services/cpi/podStatus_test.go new file mode 100644 index 0000000..c8b79d5 --- /dev/null +++ b/pkg/services/cpi/podStatus_test.go @@ -0,0 +1,833 @@ +// Copyright (c) Huawei Technologies Co., Ltd. 2024. All rights reserved. +// rubik licensed under the Mulan PSL v2. +// You can use this software according to the terms and conditions of the Mulan PSL v2. +// You may obtain a copy of Mulan PSL v2 at: +// http://license.coscl.org.cn/MulanPSL2 +// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR +// PURPOSE. +// See the Mulan PSL v2 for more details. +// Author: Kelu Ye +// Date: 2024-09-19 +// Description: This file is used for testing podstatus.go + +// Package cpi is for CPU Interference Detection Service +package cpi + +import ( + "math" + "os" + "path" + "sync" + "testing" + "time" + + "isula.org/rubik/pkg/common/constant" + "isula.org/rubik/pkg/common/perf" + "isula.org/rubik/pkg/core/typedef/cgroup" +) + +// TestPodStatusUpdateThreshold tests updateThreshold function +func TestPodStatusUpdateThreshold(t *testing.T) { + type fields struct { + isOnline bool + cpiMean float64 + count int64 + stdDev float64 + } + type target struct { + targetMean float64 + targetStdDev float64 + targetCount int64 + } + tests := []struct { + name string + fields fields + expired *dataSeries + target target + }{ + { + name: "pre count = 0", + fields: fields{ + isOnline: true, + cpiMean: 0.0, + count: 0, + stdDev: 0.0, + }, + expired: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + target: target{ + targetMean: 2.118181818181818, + targetCount: 11, + targetStdDev: 0.1898237547074646, + }, + }, + { + name: "pre count = 5", + fields: fields{ + isOnline: true, + cpiMean: 2.0, + count: 5, + stdDev: 0.1, + }, + expired: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + target: target{ + targetMean: 2.08125, + targetCount: 16, + targetStdDev: 0.17577951388031546, + }, + }, + { + name: "pod is offlinePod", + fields: fields{ + isOnline: false, + cpiMean: 0.0, + count: 0, + stdDev: 0.0, + }, + expired: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + target: target{ + targetMean: 0.0, + targetCount: 0, + targetStdDev: 0.0, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + podStatus := &podStatus{ + isOnline: tt.fields.isOnline, + cpiMean: tt.fields.cpiMean, + count: tt.fields.count, + stdDev: tt.fields.stdDev, + } + if !podStatus.isOnline { + return + } + podStatus.updateCPIStatistic(tt.expired) + if math.Abs(podStatus.cpiMean-tt.target.targetMean) > epsilon || math.Abs(podStatus.stdDev-tt.target.targetStdDev) > epsilon || podStatus.count != tt.target.targetCount { + t.Error("updateThreshold result errors") + } + }) + } +} + +// TestPodStatusExpireAndUpdateThreshold tests expireAndUpdateThreshold function +func TestPodStatusExpireAndUpdateThreshold(t *testing.T) { + _, err := perf.CgroupStat(cgroup.AbsoluteCgroupPath("perf_event", "", ""), time.Millisecond, cpiConf) + if err != nil { + return + } + type fields struct { + isOnline bool + cpiSeries *dataSeries + cpuUsageSeries *dataSeries + cpiMean float64 + count int64 + stdDev float64 + } + type target struct { + targetMean float64 + targetStdDev float64 + targetCpiSeries *dataSeries + targetCount int64 + } + tests := []struct { + name string + fields fields + expireNano int64 + target target + }{{ + name: "offlinePod only expire cpuUsage", + fields: fields{ + isOnline: false, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + cpiMean: 0, + count: 0, + stdDev: 0, + }, + expireNano: 4, + }, + { + name: "onlinePod only expire", + fields: fields{ + isOnline: true, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + cpiMean: 0, + count: 0, + stdDev: 0, + }, + expireNano: 4, + target: target{ + targetMean: 2.0666666666666664, + targetStdDev: 0.047140452079103216, + targetCount: 3, + }, + }, + { + name: "onlinePod expires and flush cpuUsage < 0.25", + fields: fields{ + isOnline: true, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 0.20, 2: 2.1, 3: 2.0, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.0, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + cpiMean: 0, + count: 0, + stdDev: 0, + }, + expireNano: 4, + target: target{ + targetMean: 2.05, + targetStdDev: 0.05, + targetCount: 2, + }, + }, + { + name: "onlinePod expires and flush cpi > cpiMean + 2 * stdDev", + fields: fields{ + isOnline: true, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.0, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 4, 2: 2.0, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.1, 11: 2.7, + }, + }, + cpiMean: 2, + count: 10, + stdDev: 0.5, + }, + expireNano: 4, + target: target{ + targetMean: 2.0083333333333333, + targetStdDev: 0.4572714972772983, + targetCount: 12, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + podStatus := &podStatus{ + isOnline: tt.fields.isOnline, + cpiSeries: tt.fields.cpiSeries, + cpuUsageSeries: tt.fields.cpuUsageSeries, + cpiMean: tt.fields.cpiMean, + count: tt.fields.count, + stdDev: tt.fields.stdDev, + } + targetPodStatus := podStatus.clone() + targetPodStatus.cpuUsageSeries.expire(tt.expireNano) + + podStatus.expireAndUpdateThreshold(tt.expireNano) + + if !dataSeriesEqual(targetPodStatus.cpuUsageSeries, podStatus.cpuUsageSeries) { + t.Errorf("expireAndUpdateThreshold cpuUsageSeries errors") + } + if tt.target.targetCount != podStatus.count || math.Abs(tt.target.targetMean-podStatus.cpiMean) > epsilon || math.Abs(tt.target.targetStdDev-podStatus.stdDev) > epsilon { + t.Errorf("expireAndUpdateThreshold threshold errors") + } + if !podStatus.isOnline { + return + } + targetPodStatus.cpiSeries.expire(tt.expireNano) + if tt.fields.isOnline && !dataSeriesEqual(targetPodStatus.cpiSeries, podStatus.cpiSeries) { + t.Errorf("expireAndUpdateThreshold cpiSeries errors") + } + }) + } +} + +// clone creates and returns a deep copy of the current podStatus instance, including its series and metrics. +func (pod *podStatus) clone() *podStatus { + target := &podStatus{ + isOnline: pod.isOnline, + Hierarchy: pod.Hierarchy, + cpiSeries: newDataSeries(), + cpuUsageSeries: newDataSeries(), + containers: pod.containers, + } + if pod.cpiSeries != nil { + for _, nano := range pod.cpiSeries.timeline { + value, ok := pod.cpiSeries.data[nano] + if ok { + target.cpiSeries.add(value, nano) + } + } + } + if pod.cpuUsageSeries != nil { + for _, nano := range pod.cpuUsageSeries.timeline { + value, ok := pod.cpuUsageSeries.data[nano] + if ok { + target.cpuUsageSeries.add(value, nano) + } + } + } + target.count = pod.count + target.cpiMean = pod.cpiMean + target.stdDev = pod.stdDev + return target +} + +// TestPodStatusCheckOutlier tests checkOutlier function +func TestPodStatusCheckOutlier(t *testing.T) { + _, err := perf.CgroupStat(cgroup.AbsoluteCgroupPath("perf_event", "", ""), time.Millisecond, cpiConf) + if err != nil { + return + } + type fields struct { + isOnline bool + cpiSeries *dataSeries + cpuUsageSeries *dataSeries + cpiMean float64 + count int64 + stdDev float64 + } + type args struct { + now time.Time + duration time.Duration + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "cpi is bigger than threshold three times", + fields: fields{ + isOnline: true, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 2.0, 11: 2.0, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 3.1, 10: 3.1, 11: 3.1, + }, + }, + cpiMean: 2.0, + count: 11, + stdDev: 0.5, + }, + args: args{ + now: time.Unix(0, 11), + duration: time.Nanosecond * 5, + }, + want: true, + }, { + name: "cpi is bigger than threshold three times but cpuUsage < 0.25", + fields: fields{ + isOnline: true, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 0.20, 11: 2.0, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 3.1, 10: 3.1, 11: 3.1, + }, + }, + cpiMean: 2.0, + count: 11, + stdDev: 0.5, + }, + args: args{ + now: time.Unix(0, 11), + duration: time.Nanosecond * 5, + }, + want: false, + }, + { + name: "cpi is bigger than threshold three times but cpiMean count <= 10", + fields: fields{ + isOnline: true, + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 2.0, 10: 0.20, 11: 2.0, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.1, 8: 2.1, 9: 3.1, 10: 3.1, 11: 3.1, + }, + }, + cpiMean: 2.0, + count: 11, + stdDev: 0.5, + }, + args: args{ + now: time.Unix(0, 11), + duration: time.Nanosecond * 5, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + podStatus := &podStatus{ + isOnline: tt.fields.isOnline, + cpiMean: tt.fields.cpiMean, + count: tt.fields.count, + stdDev: tt.fields.stdDev, + cpiSeries: tt.fields.cpiSeries, + cpuUsageSeries: tt.fields.cpuUsageSeries, + } + if got := podStatus.checkOutlier(tt.args.now, tt.args.duration); got != tt.want { + t.Errorf("podStatus.checkOutlier() = %v, want %v", got, tt.want) + } + }) + } +} + +// TestPodStatusCheckAntagonists tests checkAntagonists function +func TestPodStatusCheckAntagonists(t *testing.T) { + type fields struct { + cpiSeries *dataSeries + cpuUsageSeries *dataSeries + cpiMean float64 + stdDev float64 + } + type args struct { + now time.Time + window time.Duration + antagonistMetric float64 + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "everything is ok", + fields: fields{ + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.2, 8: 2.5, 9: 3.2, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 3.1, 2: 3.5, 3: 2.8, 4: 2.9, 5: 3.1, 6: 3.1, 7: 2.8, 8: 3.1, 9: 3.1, + }, + }, + cpiMean: 2.0, + stdDev: 0.2, + }, + args: args{ + now: time.Unix(0, 11), + window: time.Nanosecond * 10, + antagonistMetric: defaultAntagonistMetric, + }, + want: true, + }, + { + name: "some cpi is filtered", + fields: fields{ + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 6: 2.0, 7: 2.2, 8: 2.5, 9: 3.2, 10: 3.3, 11: 3.5, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 11}, + data: map[int64]float64{ + 1: 2.4, 2: 2.4, 3: 2.4, 4: 2.4, 5: 2.4, 6: 2.4, 7: 2.4, 8: 2.4, 11: 2.4, + }, + }, + cpiMean: 2.0, + stdDev: 0.2, + }, + args: args{ + now: time.Unix(0, 11), + window: time.Nanosecond * 10, + antagonistMetric: defaultAntagonistMetric, + }, + want: false, + }, + { + name: "some cpu usage is not collected", + fields: fields{ + cpuUsageSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + data: map[int64]float64{ + 1: 2.0, 2: 2.1, 3: 2.1, 4: 2.0, 5: 2.1, 8: 2.5, 9: 3.2, 10: 3.3, 11: 3.5, + }, + }, + cpiSeries: &dataSeries{ + timeline: []int64{1, 2, 3, 4, 5, 6, 7, 8, 11}, + data: map[int64]float64{ + 1: 2.4, 2: 2.4, 3: 2.4, 4: 2.4, 5: 2.4, 6: 2.4, 7: 2.4, 8: 2.4, 11: 2.4, + }, + }, + cpiMean: 2.0, + stdDev: 0.2, + }, + args: args{ + now: time.Unix(0, 11), + window: time.Nanosecond * 10, + antagonistMetric: defaultAntagonistMetric, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + onlinePod := &podStatus{ + cpiSeries: tt.fields.cpiSeries, + cpiMean: tt.fields.cpiMean, + stdDev: tt.fields.stdDev, + } + offlinePod := &podStatus{ + cpuUsageSeries: tt.fields.cpuUsageSeries, + } + if got := onlinePod.checkAntagonists(tt.args.now, tt.args.window, offlinePod, tt.args.antagonistMetric); got != tt.want { + t.Errorf("podStatus.isAntagonists() = %v, want %v", got, tt.want) + } + }) + } +} + +// TestPodStatusCollectData tests collectData function +func TestPodStatusCollectData(t *testing.T) { + _, err := perf.CgroupStat(cgroup.AbsoluteCgroupPath("perf_event", "", ""), time.Millisecond, cpiConf) + if err != nil { + return + } + sampleDur := time.Second + type fields struct { + isOnline bool + Hierarchy *cgroup.Hierarchy + cpiSeries *dataSeries + cpuUsageSeries *dataSeries + } + type args struct { + sampleDur time.Duration + nowNano int64 + } + tests := []struct { + name string + fields fields + args args + dataSize int + }{ + { + name: "collect online pod", + fields: fields{ + isOnline: true, + Hierarchy: &cgroup.Hierarchy{ + Path: "", + }, + cpiSeries: newDataSeries(), + cpuUsageSeries: newDataSeries(), + }, + args: args{ + sampleDur: sampleDur, + nowNano: time.Now().UnixNano(), + }, + dataSize: 1, + }, + { + name: "collect offline pod", + fields: fields{ + isOnline: true, + Hierarchy: &cgroup.Hierarchy{ + Path: "", + }, + cpiSeries: newDataSeries(), + cpuUsageSeries: newDataSeries(), + }, + args: args{ + sampleDur: sampleDur, + nowNano: time.Now().UnixNano(), + }, + dataSize: 1, + }, + { + name: "cgroupPath is not exist ", + fields: fields{ + isOnline: true, + Hierarchy: &cgroup.Hierarchy{ + Path: "errorPath", + }, + cpiSeries: newDataSeries(), + cpuUsageSeries: newDataSeries(), + }, + args: args{ + sampleDur: sampleDur, + nowNano: time.Now().UnixNano(), + }, + dataSize: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + podStatus := &podStatus{ + isOnline: tt.fields.isOnline, + Hierarchy: tt.fields.Hierarchy, + cpiSeries: tt.fields.cpiSeries, + cpuUsageSeries: tt.fields.cpuUsageSeries, + } + podStatus.collectData(tt.args.sampleDur, tt.args.nowNano) + time.Sleep(defaultSampleDur) + if podStatus.isOnline && (len(podStatus.cpiSeries.data) != tt.dataSize || len(podStatus.cpuUsageSeries.data) != tt.dataSize) { + t.Errorf("want dataSize = %v, got %v", tt.dataSize, len(podStatus.cpiSeries.data)) + } + if !podStatus.isOnline && len(podStatus.cpuUsageSeries.data) != tt.dataSize { + t.Errorf("want dataSize = %v, got %v", tt.dataSize, len(podStatus.cpiSeries.data)) + } + }) + } +} + +// TestPodStatusLimit tests limit function +func TestPodStatuslimit(t *testing.T) { + _, err := perf.CgroupStat(cgroup.AbsoluteCgroupPath("perf_event", "", ""), time.Millisecond, cpiConf) + if err != nil { + return + } + type fields struct { + Hierarchy *cgroup.Hierarchy + containers map[string]*containerStatus + isLimited bool + originQuota string + podMutex sync.RWMutex + } + type args struct { + quota string + limitDur time.Duration + } + + tests := []struct { + name string + fields fields + args args + wantErr bool + delayerIsNil bool + }{ + { + name: "pod is limited", + fields: fields{ + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp", + }, + containers: map[string]*containerStatus{ + "container1": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp/container1", + }, + preCpuQuota: "10000", + }, + "container2": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp/container2", + }, + preCpuQuota: "10000", + }, + }, + isLimited: true, + originQuota: "10000", + }, + args: args{ + quota: "1000", + limitDur: time.Second * 2, + }, + wantErr: false, + delayerIsNil: false, + }, + { + name: "pod is not limited but delayer is not nil", + fields: fields{ + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp", + }, + containers: map[string]*containerStatus{ + "container1": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp/container1", + }, + preCpuQuota: "10000", + }, + "container2": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp/container2", + }, + preCpuQuota: "10000", + }, + }, + isLimited: false, + originQuota: "10000", + }, + args: args{ + quota: "1000", + limitDur: time.Second * 2, + }, + wantErr: false, + delayerIsNil: false, + }, + { + name: "pod is not limited and delayer is nil", + fields: fields{ + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp", + }, + containers: map[string]*containerStatus{ + "container1": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp/container1", + }, + preCpuQuota: "10000", + }, + "container2": { + Hierarchy: &cgroup.Hierarchy{ + MountPoint: "/", + Path: "tmp/container2", + }, + preCpuQuota: "10000", + }, + }, + isLimited: false, + originQuota: "10000", + }, + args: args{ + quota: "1000", + limitDur: time.Second * 2, + }, + wantErr: false, + delayerIsNil: true, + }, + } + for _, tt := range tests { + createFile := func(dir string, fileName string, content string) { + if err := os.MkdirAll(dir, constant.DefaultDirMode); err != nil { + t.Errorf("error creating temp dir: %v", err) + } + file, err := os.Create(path.Join(dir, fileName)) + file.Chmod(constant.DefaultFileMode) + if err != nil { + t.Errorf("error creating quota file: %v", err) + } + if _, err := file.Write([]byte(content)); err != nil { + t.Errorf("error writing to quota file: %v", err) + } + file.Close() + } + + t.Run(tt.name, func(t *testing.T) { + podDir := "/cpu/tmp" + fileName := "cpu.cfs_quota_us" + createFile(podDir, fileName, tt.fields.originQuota) + containerDir1 := "/cpu/tmp/container1" + containerDir2 := "/cpu/tmp/container2" + createFile(containerDir1, fileName, tt.fields.originQuota) + createFile(containerDir2, fileName, tt.fields.originQuota) + defer os.RemoveAll(podDir) + + podStatus := &podStatus{ + Hierarchy: tt.fields.Hierarchy, + preCpuQuota: tt.fields.originQuota, + podMutex: tt.fields.podMutex, + containers: tt.fields.containers, + } + if !tt.delayerIsNil { + podStatus.limit(tt.args.quota, tt.args.limitDur) + } + if !tt.fields.isLimited { + time.Sleep(tt.args.limitDur) + } + err := podStatus.limit(tt.args.quota, tt.args.limitDur) + if (err != nil) != tt.wantErr { + t.Errorf("podStatus.limitCpuQuota() error = %v, wantErr %v", err, tt.wantErr) + } + if err == nil { + quota := podStatus.GetCgroupAttr(cpuQuotaKey).Value + if quota != tt.args.quota { + t.Errorf("limit pod quota errors") + } + for _, container := range podStatus.containers { + containerQuota := container.GetCgroupAttr(cpuQuotaKey).Value + if containerQuota != tt.args.quota { + t.Errorf("limit container quota errors") + } + } + + time.Sleep(tt.args.limitDur + time.Second) + + quota = podStatus.GetCgroupAttr(cpuQuotaKey).Value + if quota != tt.fields.originQuota { + t.Errorf("recover pod quota errors") + } + for _, container := range podStatus.containers { + containerQuota := container.GetCgroupAttr(cpuQuotaKey).Value + if containerQuota != container.preCpuQuota { + t.Errorf("limit container quota errors") + } + } + + } + }) + } +} -- Gitee