Skip to content

Commit

Permalink
Save headscale logs if jobs fail
Browse files Browse the repository at this point in the history
  • Loading branch information
kradalby committed Aug 20, 2021
1 parent 53168d5 commit 1f422af
Show file tree
Hide file tree
Showing 2 changed files with 78 additions and 15 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,5 @@ config.json
*.key
/db.sqlite
*.sqlite3

test_output/
91 changes: 76 additions & 15 deletions integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,13 @@ package headscale

import (
"bytes"
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"testing"
"time"
Expand All @@ -22,10 +25,35 @@ import (

type IntegrationTestSuite struct {
suite.Suite
stats *suite.SuiteInformation
}

func TestIntegrationTestSuite(t *testing.T) {
suite.Run(t, new(IntegrationTestSuite))
s := new(IntegrationTestSuite)
suite.Run(t, s)

// HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs.
for _, tailscale := range tailscales {
if err := pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}

if !s.stats.Passed() {
err := saveLog(&headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := pool.Purge(&headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}

if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}

var integrationTmpDir string
Expand All @@ -34,7 +62,7 @@ var ih Headscale
var pool dockertest.Pool
var network dockertest.Network
var headscale dockertest.Resource
var tailscaleCount int = 20
var tailscaleCount int = 25
var tailscales map[string]dockertest.Resource

func executeCommand(resource *dockertest.Resource, cmd []string) (string, error) {
Expand Down Expand Up @@ -62,6 +90,48 @@ func executeCommand(resource *dockertest.Resource, cmd []string) (string, error)
return stdout.String(), nil
}

func saveLog(resource *dockertest.Resource, basePath string) error {
err := os.MkdirAll(basePath, os.ModePerm)
if err != nil {
return err
}

var stdout bytes.Buffer
var stderr bytes.Buffer

err = pool.Client.Logs(
docker.LogsOptions{
Context: context.TODO(),
Container: resource.Container.ID,
OutputStream: &stdout,
ErrorStream: &stderr,
Tail: "all",
RawTerminal: false,
Stdout: true,
Stderr: true,
Follow: false,
Timestamps: false,
},
)
if err != nil {
return err
}

fmt.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)

err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0644)
if err != nil {
return err
}

err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0644)
if err != nil {
return err
}

return nil
}

func dockerRestartPolicy(config *docker.HostConfig) {
// set AutoRemove to true so that stopped container goes away by itself
config.AutoRemove = true
Expand Down Expand Up @@ -194,23 +264,14 @@ func (s *IntegrationTestSuite) SetupSuite() {

// The nodes need a bit of time to get their updated maps from headscale
// TODO: See if we can have a more deterministic wait here.
time.Sleep(120 * time.Second)
time.Sleep(60 * time.Second)
}

func (s *IntegrationTestSuite) TearDownSuite() {
for _, tailscale := range tailscales {
if err := pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}

if err := pool.Purge(&headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}

if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
func (s *IntegrationTestSuite) HandleStats(suiteName string, stats *suite.SuiteInformation) {
s.stats = stats
}

func (s *IntegrationTestSuite) TestListNodes() {
Expand Down

0 comments on commit 1f422af

Please sign in to comment.