-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathcore.go
397 lines (347 loc) · 14 KB
/
core.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
"context"
"fmt"
"sync"
"time"
gochache "github.com/patrickmn/go-cache"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
informerv1 "k8s.io/client-go/informers/core/v1"
listerv1 "k8s.io/client-go/listers/core/v1"
klog "k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/scheduler/framework"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
"sigs.k8s.io/scheduler-plugins/pkg/logger"
"sigs.k8s.io/scheduler-plugins/pkg/util"
)
type Status string
const (
// PodGroupNotSpecified denotes no PodGroup is specified in the Pod spec.
PodGroupNotSpecified Status = "PodGroup not specified"
// PodGroupNotFound denotes the specified PodGroup in the Pod spec is
// not found in API server.
PodGroupNotFound Status = "PodGroup not found"
Success Status = "Success"
Wait Status = "Wait"
permitStateKey = "PermitFluence"
)
// TODO should eventually store group name here to reassociate on reload
type FluxStateData struct {
NodeName string
}
type PermitState struct {
Activate bool
}
func (s *PermitState) Clone() framework.StateData {
return &PermitState{Activate: s.Activate}
}
func (s *FluxStateData) Clone() framework.StateData {
clone := &FluxStateData{
NodeName: s.NodeName,
}
return clone
}
// Manager defines the interfaces for PodGroup management.
type Manager interface {
PreFilter(context.Context, *corev1.Pod, *framework.CycleState) error
GetPodNode(*corev1.Pod) string
GetPodGroup(context.Context, *corev1.Pod) (string, *v1alpha1.PodGroup)
GetCreationTimestamp(*corev1.Pod, time.Time) metav1.MicroTime
DeletePermittedPodGroup(string)
Permit(context.Context, *framework.CycleState, *corev1.Pod) Status
CalculateAssignedPods(string, string) int
ActivateSiblings(pod *corev1.Pod, state *framework.CycleState)
BackoffPodGroup(string, time.Duration)
}
// PodGroupManager defines the scheduling operation called
type PodGroupManager struct {
// client is a generic controller-runtime client to manipulate both core resources and PodGroups.
client client.Client
// snapshotSharedLister is pod shared list
snapshotSharedLister framework.SharedLister
// scheduleTimeout is the default timeout for podgroup scheduling.
// If podgroup's scheduleTimeoutSeconds is set, it will be used.
scheduleTimeout *time.Duration
// permittedpodGroup stores the podgroup name which has passed the pre resource check.
permittedpodGroup *gochache.Cache
// backedOffpodGroup stores the podgorup name which failed scheduling recently.
backedOffpodGroup *gochache.Cache
// podLister is pod lister
podLister listerv1.PodLister
// This isn't great to save state, but we can improve upon it
// we should have a way to load jobids into this if fluence is recreated
// If we can annotate them in fluxion and query for that, we can!
groupToJobId map[string]uint64
podToNode map[string]string
// Probably should just choose one... oh well
sync.RWMutex
mutex sync.Mutex
log *logger.DebugLogger
}
// NewPodGroupManager creates a new operation object.
func NewPodGroupManager(
client client.Client,
snapshotSharedLister framework.SharedLister,
scheduleTimeout *time.Duration,
podInformer informerv1.PodInformer,
log *logger.DebugLogger,
) *PodGroupManager {
podGroupManager := &PodGroupManager{
client: client,
snapshotSharedLister: snapshotSharedLister,
scheduleTimeout: scheduleTimeout,
podLister: podInformer.Lister(),
permittedpodGroup: gochache.New(3*time.Second, 3*time.Second),
backedOffpodGroup: gochache.New(10*time.Second, 10*time.Second),
groupToJobId: map[string]uint64{},
podToNode: map[string]string{},
log: log,
}
return podGroupManager
}
func (podGroupManager *PodGroupManager) BackoffPodGroup(groupName string, backoff time.Duration) {
if backoff == time.Duration(0) {
return
}
podGroupManager.backedOffpodGroup.Add(groupName, nil, backoff)
}
// ActivateSiblings stashes the pods belonging to the same PodGroup of the given pod
// in the given state, with a reserved key "kubernetes.io/pods-to-activate".
func (podGroupManager *PodGroupManager) ActivateSiblings(pod *corev1.Pod, state *framework.CycleState) {
groupName := util.GetPodGroupLabel(pod)
if groupName == "" {
return
}
// Only proceed if it's explicitly requested to activate sibling pods.
if c, err := state.Read(permitStateKey); err != nil {
return
} else if s, ok := c.(*PermitState); !ok || !s.Activate {
return
}
pods, err := podGroupManager.podLister.Pods(pod.Namespace).List(
labels.SelectorFromSet(labels.Set{v1alpha1.PodGroupLabel: groupName}),
)
if err != nil {
klog.ErrorS(err, "Failed to obtain pods belong to a PodGroup", "podGroup", groupName)
return
}
for i := range pods {
if pods[i].UID == pod.UID {
pods = append(pods[:i], pods[i+1:]...)
break
}
}
if len(pods) != 0 {
if c, err := state.Read(framework.PodsToActivateKey); err == nil {
if s, ok := c.(*framework.PodsToActivate); ok {
s.Lock()
for _, pod := range pods {
namespacedName := GetNamespacedName(pod)
s.Map[namespacedName] = pod
}
s.Unlock()
}
}
}
}
// GetStatuses string (of all pods) to show for debugging purposes
func (podGroupManager *PodGroupManager) GetStatuses(
pods []*corev1.Pod,
) string {
statuses := ""
// We need to distinguish 0 from the default and not finding anything
for _, pod := range pods {
statuses += " " + fmt.Sprintf("%s", pod.Status.Phase)
}
return statuses
}
// GetPodNode is a quick lookup to see if we have a node
func (podGroupManager *PodGroupManager) GetPodNode(pod *corev1.Pod) string {
node, _ := podGroupManager.podToNode[pod.Name]
return node
}
// Permit permits a pod to run, if the minMember match, it would send a signal to chan.
func (podGroupManager *PodGroupManager) Permit(ctx context.Context, state *framework.CycleState, pod *corev1.Pod) Status {
groupName, podGroup := podGroupManager.GetPodGroup(ctx, pod)
if groupName == "" {
return PodGroupNotSpecified
}
if podGroup == nil {
// A Pod with a podGroup name but without a PodGroup found is denied.
return PodGroupNotFound
}
assigned := podGroupManager.CalculateAssignedPods(podGroup.Name, podGroup.Namespace)
// The number of pods that have been assigned nodes is calculated from the snapshot.
// The current pod in not included in the snapshot during the current scheduling cycle.
if int32(assigned)+1 >= podGroup.Spec.MinMember {
return Success
}
if assigned == 0 {
// Given we've reached Permit(), it's mean all PreFilter checks (minMember & minResource)
// already pass through, so if assigned == 0, it could be due to:
// - minResource get satisfied
// - new pods added
// In either case, we should and only should use this 0-th pod to trigger activating
// its siblings.
// It'd be in-efficient if we trigger activating siblings unconditionally.
// See https://github.com/kubernetes-sigs/scheduler-plugins/issues/682
state.Write(permitStateKey, &PermitState{Activate: true})
}
return Wait
}
// PreFilter filters out a pod if
// 1. it belongs to a podgroup that was recently denied or
// 2. the total number of pods in the podgroup is less than the minimum number of pods
// that is required to be scheduled.
func (podGroupManager *PodGroupManager) PreFilter(
ctx context.Context,
pod *corev1.Pod,
state *framework.CycleState,
) error {
podGroupManager.log.Info("[PodGroup PreFilter] pod %s", klog.KObj(pod))
groupName, podGroup := podGroupManager.GetPodGroup(ctx, pod)
if podGroup == nil {
return nil
}
_, exists := podGroupManager.backedOffpodGroup.Get(groupName)
if exists {
return fmt.Errorf("podGroup %v failed recently", groupName)
}
pods, err := podGroupManager.podLister.Pods(pod.Namespace).List(
labels.SelectorFromSet(labels.Set{v1alpha1.PodGroupLabel: util.GetPodGroupLabel(pod)}),
)
if err != nil {
return fmt.Errorf("podLister list pods failed: %w", err)
}
// Only allow scheduling the first in the group so the others come after
// Get statuses to show for debugging
statuses := podGroupManager.GetStatuses(pods)
// This shows us the number of pods we have in the set and their states
podGroupManager.log.Info("[PodGroup PreFilter] group: %s pods: %s MinMember: %d Size: %d", groupName, statuses, podGroup.Spec.MinMember, len(pods))
if len(pods) < int(podGroup.Spec.MinMember) {
return fmt.Errorf("pre-filter pod %v cannot find enough sibling pods, "+
"current pods number: %v, minMember of group: %v", pod.Name, len(pods), podGroup.Spec.MinMember)
}
// TODO we likely can take advantage of these resources or other custom
// attributes we add. For now ignore and calculate based on pod needs (above)
// if podGroup.Spec.MinResources == nil {
// fmt.Printf("Fluence Min resources are null, skipping PreFilter")
// return nil
// }
// This is from coscheduling.
// TODO(cwdsuzhou): This resource check may not always pre-catch unschedulable pod group.
// It only tries to PreFilter resource constraints so even if a PodGroup passed here,
// it may not necessarily pass Filter due to other constraints such as affinity/taints.
_, exists = podGroupManager.permittedpodGroup.Get(groupName)
if exists {
podGroupManager.log.Info("[PodGroup PreFilter] Pod Group %s is already admitted", groupName)
return nil
}
// TODO: right now we ask Fluxion for a podspec based on ONE representative pod, but
// we have the whole group! We can handle different pod needs now :)
repPod := pods[0]
nodes, err := podGroupManager.AskFlux(ctx, *repPod, podGroup, groupName)
if err != nil {
podGroupManager.log.Info("[PodGroup PreFilter] Fluxion returned an error %s, not schedulable", err.Error())
return err
}
podGroupManager.log.Info("Node Selected %s (pod group %s)", nodes, groupName)
// Some reason fluxion gave us the wrong size?
if len(nodes) != len(pods) {
podGroupManager.log.Warning("[PodGroup PreFilter] group %s needs %d nodes but Fluxion returned the wrong number nodes %d.", groupName, len(pods), len(nodes))
podGroupManager.mutex.Lock()
podGroupManager.cancelFluxJob(groupName, repPod)
podGroupManager.mutex.Unlock()
}
// Create a fluxState (CycleState) with all nodes - this is used to retrieve
// the specific node assigned to the pod in Filter, which returns a node
// Note that this probably is not useful beyond the pod we are in the context
// of, but why not do it.
for i, node := range nodes {
pod := pods[i]
stateData := FluxStateData{NodeName: node}
state.Write(framework.StateKey(pod.Name), &stateData)
// Also save to the podToNode lookup
podGroupManager.mutex.Lock()
podGroupManager.podToNode[pod.Name] = node
podGroupManager.mutex.Unlock()
}
podGroupManager.permittedpodGroup.Add(groupName, groupName, *podGroupManager.scheduleTimeout)
return nil
}
// GetCreationTimestamp returns the creation time of a podGroup or a pod in seconds (time.MicroTime)
// The Status.CreationTime is set by the PodGroup reconciler, which has to happen before we have
// a PodGroup. I don't see cases when this wouldn't happen, but in case we fall back to
// converting the pg.CreationTime to a MicroTime
func (podGroupManager *PodGroupManager) GetCreationTimestamp(pod *corev1.Pod, ts time.Time) metav1.MicroTime {
groupName := util.GetPodGroupLabel(pod)
if len(groupName) == 0 {
return metav1.NewMicroTime(ts)
}
var podGroup v1alpha1.PodGroup
if err := podGroupManager.client.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: groupName}, &podGroup); err != nil {
return metav1.NewMicroTime(ts)
}
// First preference goes to microseconds. This should be set, as it is set by the first
// reconcile, and we wouldn'thave a pod group if it didn't pass through that.
if !podGroup.Status.CreationTime.IsZero() {
return podGroup.Status.CreationTime
}
// Fall back to CreationTime from Kubernetes, in seconds
// In practice this should not happen
return metav1.NewMicroTime(podGroup.CreationTimestamp.Time)
}
// CalculateAssignedPods returns the number of pods that has been assigned nodes: assumed or bound.
func (podGroupManager *PodGroupManager) CalculateAssignedPods(podGroupName, namespace string) int {
nodeInfos, err := podGroupManager.snapshotSharedLister.NodeInfos().List()
if err != nil {
podGroupManager.log.Error("Cannot get nodeInfos from frameworkHandle: %s", err)
return 0
}
var count int
for _, nodeInfo := range nodeInfos {
for _, podInfo := range nodeInfo.Pods {
pod := podInfo.Pod
if util.GetPodGroupLabel(pod) == podGroupName && pod.Namespace == namespace && pod.Spec.NodeName != "" {
count++
}
}
}
return count
}
// DeletePermittedPodGroup deletes a podGroup that passes Pre-Filter but reaches PostFilter.
func (podGroupManager *PodGroupManager) DeletePermittedPodGroup(groupName string) {
podGroupManager.permittedpodGroup.Delete(groupName)
}
// GetPodGroup returns the PodGroup that a Pod belongs to in cache.
func (podGroupManager *PodGroupManager) GetPodGroup(ctx context.Context, pod *corev1.Pod) (string, *v1alpha1.PodGroup) {
groupName := util.GetPodGroupLabel(pod)
if len(groupName) == 0 {
return "", nil
}
var podGroup v1alpha1.PodGroup
if err := podGroupManager.client.Get(ctx, types.NamespacedName{Namespace: pod.Namespace, Name: groupName}, &podGroup); err != nil {
return fmt.Sprintf("%v/%v", pod.Namespace, groupName), nil
}
return fmt.Sprintf("%v/%v", pod.Namespace, groupName), &podGroup
}
// GetNamespacedName returns the namespaced name.
func GetNamespacedName(obj metav1.Object) string {
return fmt.Sprintf("%v/%v", obj.GetNamespace(), obj.GetName())
}