Skip to content

Commit

Permalink
bug fix for retries set to 0 + timing tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Seth Ammons committed Apr 16, 2016
1 parent e35f6b7 commit 3f34fb7
Show file tree
Hide file tree
Showing 4 changed files with 227 additions and 8 deletions.
27 changes: 27 additions & 0 deletions benchmarks/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Timing Tests

It was noted in [issue #2](github.com/sethgrid/pester/issue/2) that Pester may be slower than the standard library (along with bug that was fixed).

I put together a quick test to see how Pester fairs against the stand library. Here are the results:

```
$ go test
Standard Library Get 675178 ns Avg.
Pester, Default 690157 ns Avg.
Pester, Retries 1, Conc 1 671322 ns Avg.
Pester, Retries 2, Conc 2 764386 ns Avg.
Pester, Retries 3, Conc 3 893899 ns Avg.
Pester, Retries 0, Conc 1 730407 ns Avg.
Pester, Retries 0, Conc 2 1077721 ns Avg.
Pester, Retries 0, Conc 3 1889403 ns Avg.
Pester, Retries 0, Conc 1 1758464 ns Avg.
Pester, Retries 2, Conc 1 1249081 ns Avg.
Pester, Retries 3, Conc 1 1824322 ns Avg.
PASS
```

Running the test locally multiple times shows some variance, but this is a typical result. In raw time, these average times are not far off from each other (about 1ms from the best to worst case). In comparisons between percents, we see a drift of up to 3x.

The up to 3x drift between the near identical default Pester implementation and the Standard Library http.Get call compared to the last test case of 'Retries 3, Conc 1' makes little sense in that the default Pester uses 'Retries 3, Conc 1' as its settings.

I think that it is safe to say that there is no material difference in speed between the Standard Library and Pester.
164 changes: 164 additions & 0 deletions benchmarks/access_time_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
package main

/*
Can't use testing.B Tests because it eats up file descriptors
*/

import (
"fmt"
"log"
"net"
"net/http"
"strconv"
"strings"
"testing"
"time"

"github.com/sethgrid/pester"
)

type getter func(string) (*http.Response, error)

func TestWarmup(t *testing.T) {
// The first request/test takes more time.
// Does not matter if we use http.Get or pester.Get
// nor if we use the default client or initialize one.
// I don't know why yet.
c := pester.New()
_ = runner("Warm Up", c.Get)
}

func TestStdLibGet(t *testing.T) {
// base case - get a url with std lib
fmt.Println(runner("Standard Library Get ", http.Get))
}

func TestPesterGetDefaults(t *testing.T) {
fmt.Println(runner("Pester, Default", pester.Get))
}

func TestPesterRetry1Conc1(t *testing.T) {
c := pester.New()
c.MaxRetries = 1
c.Concurrency = 1

fmt.Println(runner("Pester, Retries 1, Conc 1", c.Get))
}

func TestPesterRetry2Conc2(t *testing.T) {
c := pester.New()
c.MaxRetries = 2
c.Concurrency = 2

fmt.Println(runner("Pester, Retries 2, Conc 2", c.Get))
}

func TestPesterRetry3Conc3(t *testing.T) {
c := pester.New()
c.MaxRetries = 3
c.Concurrency = 3

fmt.Println(runner("Pester, Retries 3, Conc 3", c.Get))
}

func TestPesterGetRetry0Conc1(t *testing.T) {
c := pester.New()
c.MaxRetries = 0
c.Concurrency = 1

fmt.Println(runner("Pester, Retries 0, Conc 1", c.Get))
}

func TestPesterGetRetry0Conc2(t *testing.T) {
c := pester.New()
c.MaxRetries = 0
c.Concurrency = 2

fmt.Println(runner("Pester, Retries 0, Conc 2", c.Get))
}

func TestPesterGetRetry0Conc3(t *testing.T) {
c := pester.New()
c.MaxRetries = 0
c.Concurrency = 3

fmt.Println(runner("Pester, Retries 0, Conc 3", c.Get))
}

func TestPesterGetRetry1Conc1(t *testing.T) {
c := pester.New()
c.MaxRetries = 0
c.Concurrency = 1

fmt.Println(runner("Pester, Retries 0, Conc 1", c.Get))
}

func TestPesterGetRetries2Conc1(t *testing.T) {
c := pester.New()
c.Concurrency = 2
c.MaxRetries = 1

fmt.Println(runner("Pester, Retries 2, Conc 1", c.Get))
}

func TestPesterGetRetries3Conc1(t *testing.T) {
c := pester.New()
c.Concurrency = 3
c.MaxRetries = 1

fmt.Println(runner("Pester, Retries 3, Conc 1", c.Get))
}

func reportTimings(name string, timings []int64) string {
var sum int64
for _, t := range timings {
sum += t
}
average := sum / int64(len(timings))
return fmt.Sprintf(" %-29s %7d ns Avg.", name, average)
}

func runServer() int {
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("OK"))
})
l, err := net.Listen("tcp", ":0")
if err != nil {
log.Fatal("unable to secure listener", err)
}
go func() {
if err := http.Serve(l, mux); err != nil {
log.Fatal("stable server error", err)
}
}()
port, err := strconv.Atoi(strings.Replace(l.Addr().String(), "[::]:", "", 1))
if err != nil {
log.Fatal("unable to determine port", err)
}
return port
}

func runner(name string, Do getter) string {
var timings []int64
for n := 0; n < 7; n++ {
stableServerPort := runServer()

start := time.Now().UnixNano()
r, err := Do(fmt.Sprintf("http://localhost:%d/%d", stableServerPort, time.Now().UnixNano()))
if err != nil {
log.Fatal("Error came back and it should not have", err)
}
if r == nil {
log.Fatal("No response!")
}
if r.Body == nil {
log.Fatal("No response body!")
}
r.Body.Close()
end := time.Now().UnixNano()

timings = append(timings, end-start)
}
return reportTimings(name, timings)
}
14 changes: 10 additions & 4 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,16 @@ type Client struct {
}

// ErrEntry is used to provide the LogString() data and is populated
// each time an error happens if KeepLog is set
// each time an error happens if KeepLog is set.
// ErrEntry.Retry is deprecated in favor of ErrEntry.Attempt
type ErrEntry struct {
Time time.Time
Method string
URL string
Verb string
Request int
Retry int
Attempt int
Err error
}

Expand Down Expand Up @@ -181,8 +183,11 @@ func (c *Client) pester(p params) (*http.Response, error) {
go func(n int, p params) {
resp := &http.Response{}
var err error

for i := 0; i < c.MaxRetries; i++ {
AttemptLimit := c.MaxRetries
if AttemptLimit <= 0 {
AttemptLimit = 1
}
for i := 1; i <= AttemptLimit; i++ {
select {
case <-finishCh:
return
Expand Down Expand Up @@ -221,7 +226,8 @@ func (c *Client) pester(p params) (*http.Response, error) {
Verb: p.verb,
URL: p.url,
Request: n,
Retry: i,
Retry: i + 1, // would remove, but would break backward compatibility
Attempt: i,
Err: err,
})

Expand Down
30 changes: 26 additions & 4 deletions main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,31 @@ func TestConcurrentRequests(t *testing.T) {
t.Log("\n", c.LogString())

if got, want := len(c.ErrLog), c.Concurrency*c.MaxRetries; got != want {
t.Error("got %d attempts, want %d", got, want)
t.Errorf("got %d attempts, want %d", got, want)
}
}

func TestConcurrentRetry0(t *testing.T) {
t.Parallel()

c := pester.New()
c.Concurrency = 4
c.MaxRetries = 0
c.KeepLog = true

nonExistantURL := "http://localhost:9000/foo"

_, err := c.Get(nonExistantURL)
if err == nil {
t.Fatal("expected to get an error")
}

// in the event of an error, let's see what the logs were
t.Log("\n", c.LogString())

if got, want := len(c.ErrLog), c.Concurrency; got != want {
t.Errorf("got %d attempts, want %d", got, want)
}
}

func TestDefaultBackoff(t *testing.T) {
Expand Down Expand Up @@ -131,11 +153,11 @@ func TestExponentialBackoff(t *testing.T) {
case 0:
startTime = e.Time.Unix()
case 1:
delta += 1
case 2:
delta += 2
case 3:
case 2:
delta += 4
case 3:
delta += 8
}
if got, want := e.Time.Unix(), startTime+delta; got != want {
t.Errorf("got time %d, want %d (%d greater than start time %d)", got, want, delta, startTime)
Expand Down

0 comments on commit 3f34fb7

Please sign in to comment.