Skip to content
This repository was archived by the owner on Aug 2, 2021. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 102 additions & 7 deletions cmd/swarm/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@
package main

import (
"archive/tar"
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"os"
Expand All @@ -25,10 +29,22 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"gopkg.in/urfave/cli.v1"
)

var legacyKeyIndex = byte(0)
var keyData = byte(6)

type dpaDBIndex struct {
Idx uint64
Access uint64
}

var dbCommand = cli.Command{
Name: "db",
CustomHelpTemplate: helpTemplate,
Expand Down Expand Up @@ -67,6 +83,9 @@ The import may be quite large, consider piping the input through the Unix
pv(1) tool to get a progress bar:

pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -`,
Flags: []cli.Flag{
SwarmLegacyFlag,
},
},
},
}
Expand All @@ -77,12 +96,6 @@ func dbExport(ctx *cli.Context) {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
}

store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()

var out io.Writer
if args[1] == "-" {
out = os.Stdout
Expand All @@ -95,6 +108,23 @@ func dbExport(ctx *cli.Context) {
out = f
}

isLegacy := localstore.IsLegacyDatabase(args[0])
if isLegacy {
count, err := exportLegacy(args[0], common.Hex2Bytes(args[2]), out)
if err != nil {
utils.Fatalf("error exporting legacy local chunk database: %s", err)
}

log.Info(fmt.Sprintf("successfully exported %d chunks from legacy db", count))
return
}

store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
defer store.Close()

count, err := store.Export(out)
if err != nil {
utils.Fatalf("error exporting local chunk database: %s", err)
Expand All @@ -109,6 +139,8 @@ func dbImport(ctx *cli.Context) {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
}

legacy := ctx.IsSet(SwarmLegacyFlag.Name)

store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
Expand All @@ -127,7 +159,7 @@ func dbImport(ctx *cli.Context) {
in = f
}

count, err := store.Import(in)
count, err := store.Import(in, legacy)
if err != nil {
utils.Fatalf("error importing local chunk database: %s", err)
}
Expand All @@ -142,3 +174,66 @@ func openLDBStore(path string, basekey []byte) (*localstore.DB, error) {

return localstore.New(path, basekey, nil)
}

func decodeIndex(data []byte, index *dpaDBIndex) error {
dec := rlp.NewStream(bytes.NewReader(data), 0)
return dec.Decode(index)
}

func getDataKey(idx uint64, po uint8) []byte {
key := make([]byte, 10)
key[0] = keyData
key[1] = po
binary.BigEndian.PutUint64(key[2:], idx)

return key
}

func exportLegacy(path string, basekey []byte, out io.Writer) (int64, error) {
tw := tar.NewWriter(out)
defer tw.Close()
db, err := leveldb.OpenFile(path, &opt.Options{OpenFilesCacheCapacity: 128})
if err != nil {
return 0, err
}
defer db.Close()

it := db.NewIterator(nil, nil)
defer it.Release()
var count int64
for ok := it.Seek([]byte{legacyKeyIndex}); ok; ok = it.Next() {
key := it.Key()
if (key == nil) || (key[0] != legacyKeyIndex) {
break
}

var index dpaDBIndex

hash := key[1:]
decodeIndex(it.Value(), &index)

po := uint8(chunk.Proximity(basekey, hash))

datakey := getDataKey(index.Idx, po)
data, err := db.Get(datakey, nil)
if err != nil {
log.Crit(fmt.Sprintf("Chunk %x found but could not be accessed: %v, %x", key, err, datakey))
continue
}

hdr := &tar.Header{
Name: hex.EncodeToString(hash),
Mode: 0644,
Size: int64(len(data)),
}
if err := tw.WriteHeader(hdr); err != nil {
return count, err
}
if _, err := tw.Write(data); err != nil {
return count, err
}
count++
}

return count, nil
}
164 changes: 164 additions & 0 deletions cmd/swarm/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,34 @@
package main

import (
"archive/tar"
"bytes"
"compress/gzip"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"runtime"
"strings"
"testing"

"github.com/ethereum/go-ethereum/cmd/swarm/testdata"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm"
"github.com/ethereum/go-ethereum/swarm/testutil"
)

const (
DATABASE_FIXTURE_BZZ_ACCOUNT = "0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
DATABASE_FIXTURE_PASSWORD = "pass"
FIXTURE_DATADIR_PREFIX = "swarm/bzz-0aa159029fa13ffa8fa1c6fff6ebceface99d6a4"
FixtureBaseKey = "a9f22b3d77b4bdf5f3eefce995d6c8e7cecf2636f20956f08a0d1ed95adb52ad"
)

// TestCLISwarmExportImport perform the following test:
// 1. runs swarm node
// 2. uploads a random file
Expand Down Expand Up @@ -99,6 +114,112 @@ func TestCLISwarmExportImport(t *testing.T) {
mustEqualFiles(t, bytes.NewReader(content), res.Body)
}

// TestExportLegacyToNew checks that an old database gets imported correctly into the new localstore structure
// The test sequence is as follows:
// 1. unpack database fixture to tmp dir
// 2. try to open with new swarm binary that should complain about old database
// 3. export from old database
// 4. remove the chunks folder
// 5. import the dump
// 6. file should be accessible
func TestExportLegacyToNew(t *testing.T) {
/*
fixture bzz account 0aa159029fa13ffa8fa1c6fff6ebceface99d6a4
*/
const UPLOADED_FILE_MD5_HASH = "a001fdae53ba50cae584b8b02b06f821"
const UPLOADED_HASH = "67a86082ee0ea1bc7dd8d955bb1e14d04f61d55ae6a4b37b3d0296a3a95e454a"
tmpdir, err := ioutil.TempDir("", "swarm-test")
log.Trace("running legacy datastore migration test", "temp dir", tmpdir)
defer os.RemoveAll(tmpdir)
if err != nil {
t.Fatal(err)
}
inflateBase64Gzip(t, testdata.DATADIR_MIGRATION_FIXTURE, tmpdir)

tmpPassword := testutil.TempFileWithContent(t, DATABASE_FIXTURE_PASSWORD)
defer os.Remove(tmpPassword)

flags := []string{
"--datadir", tmpdir,
"--bzzaccount", DATABASE_FIXTURE_BZZ_ACCOUNT,
"--password", tmpPassword,
}

newSwarmOldDb := runSwarm(t, flags...)
_, matches := newSwarmOldDb.ExpectRegexp(".+")
newSwarmOldDb.ExpectExit()

if len(matches) == 0 {
t.Fatalf("stdout not matched")
}

if newSwarmOldDb.ExitStatus() == 0 {
t.Fatal("should error")
}
t.Log("exporting legacy database")
actualDataDir := path.Join(tmpdir, FIXTURE_DATADIR_PREFIX)
exportCmd := runSwarm(t, "--verbosity", "5", "db", "export", actualDataDir+"/chunks", tmpdir+"/export.tar", FixtureBaseKey)
exportCmd.ExpectExit()

stat, err := os.Stat(tmpdir + "/export.tar")
if err != nil {
t.Fatal(err)
}

// make some silly size assumption
if stat.Size() < 90000 {
t.Fatal("export size too small")
}
t.Log("removing chunk datadir")
err = os.RemoveAll(path.Join(actualDataDir, "chunks"))
if err != nil {
t.Fatal(err)
}

// start second cluster
cluster2 := newTestCluster(t, 1)
var info2 swarm.Info
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
t.Fatal(err)
}

// stop second cluster, so that we close LevelDB
cluster2.Stop()
defer cluster2.Cleanup()

// import the export.tar
importCmd := runSwarm(t, "db", "import", "--legacy", info2.Path+"/chunks", tmpdir+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
importCmd.ExpectExit()

// spin second cluster back up
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
t.Log("trying to http get the file")
// try to fetch imported file
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + UPLOADED_HASH)
if err != nil {
t.Fatal(err)
}

if res.StatusCode != 200 {
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
}
h := md5.New()
if _, err := io.Copy(h, res.Body); err != nil {
t.Fatal(err)
}

sum := h.Sum(nil)

b, err := hex.DecodeString(UPLOADED_FILE_MD5_HASH)
if err != nil {
t.Fatal(err)
}

if !bytes.Equal(sum, b) {
t.Fatal("should be equal")
}
}

func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
h := md5.New()
upLen, err := io.Copy(h, up)
Expand All @@ -117,3 +238,46 @@ func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
}
}

func inflateBase64Gzip(t *testing.T, base64File, directory string) {
t.Helper()

f := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64File))
gzf, err := gzip.NewReader(f)
if err != nil {
t.Fatal(err)
}

tarReader := tar.NewReader(gzf)

for {
header, err := tarReader.Next()
if err == io.EOF {
break
}

if err != nil {
t.Fatal(err)
}

name := header.Name

switch header.Typeflag {
case tar.TypeDir:
err := os.Mkdir(path.Join(directory, name), os.ModePerm)
if err != nil {
t.Fatal(err)
}
case tar.TypeReg:
file, err := os.Create(path.Join(directory, name))
if err != nil {
t.Fatal(err)
}
if _, err := io.Copy(file, tarReader); err != nil {
t.Fatal(err)
}
default:
t.Fatal("shouldn't happen")
}
}
}
4 changes: 4 additions & 0 deletions cmd/swarm/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,4 +182,8 @@ var (
Usage: "URL of the Global Store API provider (only for testing)",
EnvVar: SwarmGlobalstoreAPI,
}
SwarmLegacyFlag = cli.BoolFlag{
Comment thread
acud marked this conversation as resolved.
Outdated
Name: "legacy",
Usage: "Use this flag when importing a db export from a legacy local store database dump (for schemas older than 'sanctuary')",
}
)
Loading