|
8 | 8 | "github.com/docker/swarmkit/manager/state"
|
9 | 9 | "github.com/docker/swarmkit/manager/state/store"
|
10 | 10 | "github.com/stretchr/testify/assert"
|
| 11 | + "github.com/stretchr/testify/require" |
11 | 12 | )
|
12 | 13 |
|
13 | 14 | func TestConstraintEnforcer(t *testing.T) {
|
@@ -168,3 +169,110 @@ func TestConstraintEnforcer(t *testing.T) {
|
168 | 169 | assert.Equal(t, "id4", shutdown3.ID)
|
169 | 170 | assert.Equal(t, api.TaskStateRejected, shutdown3.Status.State)
|
170 | 171 | }
|
| 172 | + |
| 173 | +// TestOutdatedPlacementConstraints tests the following scenario: If a task is |
| 174 | +// associacted with a service then we must use the constraints from the current |
| 175 | +// service spec rather than the constraints from the task spec because they may |
| 176 | +// be outdated. This will happen if the service was previously updated in a way |
| 177 | +// which only changes the placement constraints and the node matched the |
| 178 | +// placement constraints both before and after that update. In the case of such |
| 179 | +// updates, the tasks are not considered "dirty" and are not restarted but it |
| 180 | +// will mean that the task spec's placement constraints are outdated. Consider |
| 181 | +// this example: |
| 182 | +// - A service is created with no constraints and a task is scheduled |
| 183 | +// to a node. |
| 184 | +// - The node is updated to add a label, this doesn't affect the task |
| 185 | +// on that node because it has no constraints. |
| 186 | +// - The service is updated to add a node label constraint which |
| 187 | +// matches the label which was just added to the node. The updater |
| 188 | +// does not shut down the task because the only the constraints have |
| 189 | +// changed and the node still matches the updated constraints. |
| 190 | +// This test initializes a new in-memory store with the expected state from |
| 191 | +// above, starts a new constraint enforcer, and then updates the node to remove |
| 192 | +// the node label. Since the node no longer satisfies the placement constraints |
| 193 | +// of the service spec, the task should be shutdown despite the fact that the |
| 194 | +// task's own spec still has the original placement constraints. |
| 195 | +func TestOutdatedTaskPlacementConstraints(t *testing.T) { |
| 196 | + node := &api.Node{ |
| 197 | + ID: "id0", |
| 198 | + Spec: api.NodeSpec{ |
| 199 | + Annotations: api.Annotations{ |
| 200 | + Name: "node1", |
| 201 | + Labels: map[string]string{ |
| 202 | + "foo": "bar", |
| 203 | + }, |
| 204 | + }, |
| 205 | + Availability: api.NodeAvailabilityActive, |
| 206 | + }, |
| 207 | + Status: api.NodeStatus{ |
| 208 | + State: api.NodeStatus_READY, |
| 209 | + }, |
| 210 | + Role: api.NodeRoleWorker, |
| 211 | + } |
| 212 | + |
| 213 | + service := &api.Service{ |
| 214 | + ID: "id1", |
| 215 | + Spec: api.ServiceSpec{ |
| 216 | + Annotations: api.Annotations{ |
| 217 | + Name: "service1", |
| 218 | + }, |
| 219 | + Task: api.TaskSpec{ |
| 220 | + Placement: &api.Placement{ |
| 221 | + Constraints: []string{ |
| 222 | + "node.labels.foo == bar", |
| 223 | + }, |
| 224 | + }, |
| 225 | + }, |
| 226 | + }, |
| 227 | + } |
| 228 | + |
| 229 | + task := &api.Task{ |
| 230 | + ID: "id2", |
| 231 | + Spec: api.TaskSpec{ |
| 232 | + Placement: nil, // Note: No placement constraints. |
| 233 | + }, |
| 234 | + ServiceID: service.ID, |
| 235 | + NodeID: node.ID, |
| 236 | + Status: api.TaskStatus{ |
| 237 | + State: api.TaskStateRunning, |
| 238 | + }, |
| 239 | + DesiredState: api.TaskStateRunning, |
| 240 | + } |
| 241 | + |
| 242 | + s := store.NewMemoryStore(nil) |
| 243 | + require.NotNil(t, s) |
| 244 | + defer s.Close() |
| 245 | + |
| 246 | + require.NoError(t, s.Update(func(tx store.Tx) error { |
| 247 | + // Prepoulate node, service, and task. |
| 248 | + for _, err := range []error{ |
| 249 | + store.CreateNode(tx, node), |
| 250 | + store.CreateService(tx, service), |
| 251 | + store.CreateTask(tx, task), |
| 252 | + } { |
| 253 | + if err != nil { |
| 254 | + return err |
| 255 | + } |
| 256 | + } |
| 257 | + return nil |
| 258 | + })) |
| 259 | + |
| 260 | + watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}) |
| 261 | + defer cancel() |
| 262 | + |
| 263 | + constraintEnforcer := New(s) |
| 264 | + defer constraintEnforcer.Stop() |
| 265 | + |
| 266 | + go constraintEnforcer.Run() |
| 267 | + |
| 268 | + // Update the node to remove the node label. |
| 269 | + require.NoError(t, s.Update(func(tx store.Tx) error { |
| 270 | + node = store.GetNode(tx, node.ID) |
| 271 | + delete(node.Spec.Annotations.Labels, "foo") |
| 272 | + return store.UpdateNode(tx, node) |
| 273 | + })) |
| 274 | + |
| 275 | + // The task should be rejected immediately. |
| 276 | + task = testutils.WatchTaskUpdate(t, watch) |
| 277 | + assert.Equal(t, api.TaskStateRejected, task.Status.State) |
| 278 | +} |
0 commit comments