Skip to content

Commit

Permalink
Add tests for ListHosts() and DeleteHost() (#3604)
Browse files Browse the repository at this point in the history
Signed-off-by: BruceAko <[email protected]>
  • Loading branch information
BruceAko authored Oct 23, 2024
1 parent 770d6c9 commit dc10763
Show file tree
Hide file tree
Showing 2 changed files with 197 additions and 4 deletions.
193 changes: 193 additions & 0 deletions scheduler/service/service_v2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -888,6 +888,199 @@ func TestServiceV2_AnnounceHost(t *testing.T) {
}
}

func TestServiceV2_ListHosts(t *testing.T) {
tests := []struct {
name string
mock func(host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder)

Check failure on line 894 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 894 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
expect func(t *testing.T, host *resource.Host, resp []*commonv2.Host, err error)

Check failure on line 895 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 895 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
}{
{
name: "host loading unsuccessful",
mock: func(host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {

Check failure on line 899 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 899 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Range(gomock.Any()).Do(func(f func(key, value any) bool) {
f(nil, nil)
}).Return().Times(1),
)
},
expect: func(t *testing.T, host *resource.Host, resp []*commonv2.Host, err error) {

Check failure on line 907 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 907 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
assert := assert.New(t)
assert.NoError(err)
assert.Equal(len(resp), 0)
},
},
{
name: "host loading successful",
mock: func(host *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {

Check failure on line 915 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 915 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Range(gomock.Any()).Do(func(f func(key, value any) bool) {
f(nil, host)
}).Return().Times(1),
)
},
expect: func(t *testing.T, host *resource.Host, resp []*commonv2.Host, err error) {

Check failure on line 923 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 923 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
assert := assert.New(t)
assert.NoError(err)
assert.Equal(len(resp), 1)
assert.EqualValues(resp[0], &commonv2.Host{
Id: mockHostID,
Type: uint32(pkgtypes.HostTypeNormal),
Hostname: "foo",
Ip: "127.0.0.1",
Port: 8003,
DownloadPort: mockRawHost.DownloadPort,
Cpu: &commonv2.CPU{
LogicalCount: mockCPU.LogicalCount,
PhysicalCount: mockCPU.PhysicalCount,
Percent: mockCPU.Percent,
ProcessPercent: mockCPU.ProcessPercent,
Times: &commonv2.CPUTimes{
User: mockCPU.Times.User,
System: mockCPU.Times.System,
Idle: mockCPU.Times.Idle,
Nice: mockCPU.Times.Nice,
Iowait: mockCPU.Times.Iowait,
Irq: mockCPU.Times.Irq,
Softirq: mockCPU.Times.Softirq,
Steal: mockCPU.Times.Steal,
Guest: mockCPU.Times.Guest,
GuestNice: mockCPU.Times.GuestNice,
},
},
Memory: &commonv2.Memory{
Total: mockMemory.Total,
Available: mockMemory.Available,
Used: mockMemory.Used,
UsedPercent: mockMemory.UsedPercent,
ProcessUsedPercent: mockMemory.ProcessUsedPercent,
Free: mockMemory.Free,
},
Network: &commonv2.Network{
TcpConnectionCount: mockNetwork.TCPConnectionCount,
UploadTcpConnectionCount: mockNetwork.UploadTCPConnectionCount,
Location: &mockNetwork.Location,
Idc: &mockNetwork.IDC,
DownloadRate: mockNetwork.DownloadRate,
DownloadRateLimit: mockNetwork.DownloadRateLimit,
UploadRate: mockNetwork.UploadRate,
UploadRateLimit: mockNetwork.UploadRateLimit,
},
Disk: &commonv2.Disk{
Total: mockDisk.Total,
Free: mockDisk.Free,
Used: mockDisk.Used,
UsedPercent: mockDisk.UsedPercent,
InodesTotal: mockDisk.InodesTotal,
InodesUsed: mockDisk.InodesUsed,
InodesFree: mockDisk.InodesFree,
InodesUsedPercent: mockDisk.InodesUsedPercent,
},
Build: &commonv2.Build{
GitVersion: mockBuild.GitVersion,
GitCommit: &mockBuild.GitCommit,
GoVersion: &mockBuild.GoVersion,
Platform: &mockBuild.Platform,
},
})
},
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctl := gomock.NewController(t)
defer ctl.Finish()
scheduling := schedulingmocks.NewMockScheduling(ctl)
res := resource.NewMockResource(ctl)

Check failure on line 996 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 996 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
hostManager := resource.NewMockHostManager(ctl)

Check failure on line 999 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 999 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
host := resource.NewHost(

Check failure on line 1000 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 1000 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname, mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type,
resource.WithCPU(mockCPU), resource.WithMemory(mockMemory), resource.WithNetwork(mockNetwork), resource.WithDisk(mockDisk), resource.WithBuild(mockBuild))

Check failure on line 1002 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Test

undefined: resource

Check failure on line 1002 in scheduler/service/service_v2_test.go

View workflow job for this annotation

GitHub Actions / Lint

undefined: resource
svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduling, dynconfig, storage)

tc.mock(host, hostManager, res.EXPECT(), hostManager.EXPECT())
resp, err := svc.ListHosts(context.Background())
tc.expect(t, host, resp.Hosts, err)
})
}
}

func TestServiceV2_DeleteHost(t *testing.T) {
tests := []struct {
name string
mock func(host *resource.Host, mockPeer *resource.Peer, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder)
expect func(t *testing.T, peer *resource.Peer, err error)
}{
{
name: "host not found",
mock: func(host *resource.Host, mockPeer *resource.Peer, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(nil, false).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, err error) {
assert := assert.New(t)
assert.Error(err)
},
},
{
name: "host has not peers",
mock: func(host *resource.Host, mockPeer *resource.Peer, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(host, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, err error) {
assert := assert.New(t)
assert.NoError(err)
},
},
{
name: "peer leaves succeeded",
mock: func(host *resource.Host, mockPeer *resource.Peer, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder) {
host.Peers.Store(mockPeer.ID, mockPeer)
mockPeer.FSM.SetState(resource.PeerStatePending)
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(host, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, err error) {
assert := assert.New(t)
assert.NoError(err)
assert.Equal(peer.FSM.Current(), resource.PeerStateLeave)
},
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctl := gomock.NewController(t)
defer ctl.Finish()
scheduling := schedulingmocks.NewMockScheduling(ctl)
res := resource.NewMockResource(ctl)
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
hostManager := resource.NewMockHostManager(ctl)
host := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskTag, mockTaskApplication, commonv2.TaskType_STANDARD, mockTaskFilteredQueryParams, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithDigest(mockTaskDigest), resource.WithPieceLength(mockTaskPieceLength))
mockPeer := resource.NewPeer(mockSeedPeerID, mockTask, host)
svc := NewV2(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduling, dynconfig, storage)

tc.mock(host, mockPeer, hostManager, res.EXPECT(), hostManager.EXPECT())
tc.expect(t, mockPeer, svc.DeleteHost(context.Background(), &schedulerv2.DeleteHostRequest{HostId: mockHostID}))
})
}
}

func TestServiceV2_handleRegisterPeerRequest(t *testing.T) {
dgst := mockTaskDigest.String()

Expand Down
8 changes: 4 additions & 4 deletions test/e2e/v2/host_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ var _ = Describe("Clients Leaving", func() {
Expect(err).NotTo(HaveOccurred())

// Add taint to master node to prevent new client from starting.
out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule").CombinedOutput()
out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "E2E:NoSchedule").CombinedOutput()
fmt.Println(string(out))
Expect(err).NotTo(HaveOccurred())

Expand All @@ -58,7 +58,7 @@ var _ = Describe("Clients Leaving", func() {
Expect(calculateNormalHostCountFromScheduler(schedulerClient)).To(Equal(hostCount - 1))

// Remove taint in master node.
out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule-").CombinedOutput()
out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "E2E:NoSchedule-").CombinedOutput()
fmt.Println(string(out))
Expect(err).NotTo(HaveOccurred())

Expand All @@ -79,7 +79,7 @@ var _ = Describe("Clients Leaving", func() {
Expect(err).NotTo(HaveOccurred())

// Add taint to master node to prevent new client from starting.
out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule").CombinedOutput()
out, err := util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "E2E:NoSchedule").CombinedOutput()
fmt.Println(string(out))
Expect(err).NotTo(HaveOccurred())

Expand All @@ -93,7 +93,7 @@ var _ = Describe("Clients Leaving", func() {
Expect(calculateNormalHostCountFromScheduler(schedulerClient)).To(Equal(hostCount - 1))

// Remove taint in master node.
out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "master:NoSchedule-").CombinedOutput()
out, err = util.KubeCtlCommand("-n", util.DragonflyNamespace, "taint", "nodes", "kind-control-plane", "E2E:NoSchedule-").CombinedOutput()
fmt.Println(string(out))
Expect(err).NotTo(HaveOccurred())

Expand Down

0 comments on commit dc10763

Please sign in to comment.