Skip to content

Commit

Permalink
fix: Resolve issue that would cause inaccurate poll intervals in poll…
Browse files Browse the repository at this point in the history
…ing functions (#343)

* Fix broken poll delay system

* Fix default poll delay

* fix nolint
  • Loading branch information
lgarber-akamai authored Jun 27, 2023
1 parent 6a65820 commit a77eea5
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 19 deletions.
8 changes: 4 additions & 4 deletions client.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ type Client struct {
debug bool
retryConditionals []RetryConditional

millisecondsPerPoll time.Duration
pollInterval time.Duration

baseURL string
apiVersion string
Expand Down Expand Up @@ -344,14 +344,14 @@ func (c *Client) SetRetryCount(count int) *Client {
// SetPollDelay sets the number of milliseconds to wait between events or status polls.
// Affects all WaitFor* functions and retries.
func (c *Client) SetPollDelay(delay time.Duration) *Client {
c.millisecondsPerPoll = delay
c.pollInterval = delay
return c
}

// GetPollDelay gets the number of milliseconds to wait between events or status polls.
// Affects all WaitFor* functions and retries.
func (c *Client) GetPollDelay() time.Duration {
return c.millisecondsPerPoll
return c.pollInterval
}

// SetHeader sets a custom header to be used in all API requests made with the current
Expand Down Expand Up @@ -405,7 +405,7 @@ func NewClient(hc *http.Client) (client Client) {

client.
SetRetryWaitTime((1000 * APISecondsPerPoll) * time.Millisecond).
SetPollDelay(1000 * APISecondsPerPoll).
SetPollDelay(APISecondsPerPoll * time.Second).
SetRetries().
SetDebug(envDebug)

Expand Down
30 changes: 15 additions & 15 deletions waitfor.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func (client Client) WaitForInstanceStatus(ctx context.Context, instanceID int,
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand All @@ -52,7 +52,7 @@ func (client Client) WaitForInstanceDiskStatus(ctx context.Context, instanceID i
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand Down Expand Up @@ -88,7 +88,7 @@ func (client Client) WaitForVolumeStatus(ctx context.Context, volumeID int, stat
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand All @@ -115,7 +115,7 @@ func (client Client) WaitForSnapshotStatus(ctx context.Context, instanceID int,
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand Down Expand Up @@ -144,7 +144,7 @@ func (client Client) WaitForVolumeLinodeID(ctx context.Context, volumeID int, li
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand Down Expand Up @@ -175,7 +175,7 @@ func (client Client) WaitForLKEClusterStatus(ctx context.Context, clusterID int,
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand Down Expand Up @@ -237,7 +237,7 @@ func (client Client) WaitForLKEClusterConditions(
return fmt.Errorf("failed to get Kubeconfig for LKE cluster %d: %w", clusterID, err)
}

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

conditionOptions := ClusterConditionOptions{LKEClusterKubeconfig: lkeKubeConfig, TransportWrapper: options.TransportWrapper}
Expand Down Expand Up @@ -313,7 +313,7 @@ func (client Client) WaitForEventFinished(
log.Printf("[INFO] Waiting %d seconds for %s events since %v for %s %v", int(duration.Seconds()), action, minStart, titledEntityType, id)
}

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)

// avoid repeating log messages
nextLog := ""
Expand Down Expand Up @@ -412,7 +412,7 @@ func (client Client) WaitForImageStatus(ctx context.Context, imageID string, sta
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand All @@ -438,7 +438,7 @@ func (client Client) WaitForMySQLDatabaseBackup(ctx context.Context, dbID int, l
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand All @@ -465,7 +465,7 @@ func (client Client) WaitForPostgresDatabaseBackup(ctx context.Context, dbID int
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand Down Expand Up @@ -515,7 +515,7 @@ func (client Client) WaitForDatabaseStatus(
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

for {
Expand Down Expand Up @@ -615,7 +615,7 @@ func (p *EventPoller) PreTask(ctx context.Context) error {
}

func (p *EventPoller) WaitForLatestUnknownEvent(ctx context.Context) (*Event, error) {
ticker := time.NewTicker(p.client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(p.client.pollInterval)
defer ticker.Stop()

f := Filter{
Expand Down Expand Up @@ -666,7 +666,7 @@ func (p *EventPoller) WaitForFinished(
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(p.client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(p.client.pollInterval)
defer ticker.Stop()

event, err := p.WaitForLatestUnknownEvent(ctx)
Expand Down Expand Up @@ -715,7 +715,7 @@ func (client Client) WaitForResourceFree(
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()

ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
ticker := time.NewTicker(client.pollInterval)
defer ticker.Stop()

// A helper function to determine whether a resource is busy
Expand Down

0 comments on commit a77eea5

Please sign in to comment.