diff --git a/dgraph/cmd/alpha/admin_backup.go b/dgraph/cmd/alpha/admin_backup.go index 5aa223c8f6e..3d1e4c389a4 100644 --- a/dgraph/cmd/alpha/admin_backup.go +++ b/dgraph/cmd/alpha/admin_backup.go @@ -109,6 +109,22 @@ func processHttpBackupRequest(ctx context.Context, r *http.Request) error { req.SinceTs = latestManifest.Since if forceFull { req.SinceTs = 0 + } else { + if worker.Config.BadgerKeyFile != "" { + // If encryption turned on, latest backup should be encrypted. + if latestManifest.Type != "" && !latestManifest.Encrypted { + err = errors.Errorf("latest manifest indicates the last backup was not encrypted " + + "but this instance has encryption turned on. Try \"forceFull\" flag.") + return err + } + } else { + // If encryption turned off, latest backup should be unencrypted. + if latestManifest.Type != "" && latestManifest.Encrypted { + err = errors.Errorf("latest manifest indicates the last backup was encrypted " + + "but this instance has encryption turned off. Try \"forceFull\" flag.") + return err + } + } } // Update the membership state to get the latest mapping of groups to predicates. @@ -160,6 +176,9 @@ func processHttpBackupRequest(ctx context.Context, r *http.Request) error { m.BackupId = latestManifest.BackupId m.BackupNum = latestManifest.BackupNum + 1 } + if worker.Config.BadgerKeyFile != "" { + m.Encrypted = true + } bp := &backup.Processor{Request: &req} return bp.CompleteBackup(ctx, &m) diff --git a/dgraph/cmd/alpha/run.go b/dgraph/cmd/alpha/run.go index 20103adcbe7..51a6026df3e 100644 --- a/dgraph/cmd/alpha/run.go +++ b/dgraph/cmd/alpha/run.go @@ -36,6 +36,7 @@ import ( "github.com/dgraph-io/badger/v2/y" "github.com/dgraph-io/dgo/v2/protos/api" "github.com/dgraph-io/dgraph/edgraph" + "github.com/dgraph-io/dgraph/ee/backup" "github.com/dgraph-io/dgraph/ee/enc" "github.com/dgraph-io/dgraph/posting" "github.com/dgraph-io/dgraph/schema" @@ -520,6 +521,7 @@ func run() { AuthToken: Alpha.Conf.GetString("auth_token"), AllottedMemory: Alpha.Conf.GetFloat64("lru_mb"), } + backup.BadgerKeyFile = opts.BadgerKeyFile // OSS, non-nil key file --> crash if !enc.EeBuild && opts.BadgerKeyFile != "" { diff --git a/ee/backup/backup.go b/ee/backup/backup.go index 30b0ae2ebee..6276ba592cf 100644 --- a/ee/backup/backup.go +++ b/ee/backup/backup.go @@ -28,6 +28,7 @@ import ( "github.com/golang/glog" "github.com/pkg/errors" + "github.com/dgraph-io/dgraph/ee/enc" "github.com/dgraph-io/dgraph/posting" "github.com/dgraph-io/dgraph/protos/pb" "github.com/dgraph-io/dgraph/x" @@ -77,8 +78,14 @@ type Manifest struct { // Path is the path to the manifest file. This field is only used during // processing and is not written to disk. Path string `json:"-"` + // Encrypted indicates whether this backup was encrypted or not. + Encrypted bool `json:"encrypted"` } +// BadgerKeyFile - This is a copy of worker.Config.BadgerKeyFile. Need to copy because +// otherwise it results in an import cycle. +var BadgerKeyFile string + func (m *Manifest) getPredsInGroup(gid uint32) predicateSet { preds, ok := m.Groups[gid] if !ok { @@ -125,7 +132,13 @@ func (pr *Processor) WriteBackup(ctx context.Context) (*pb.Status, error) { } var maxVersion uint64 - gzWriter := gzip.NewWriter(handler) + + newhandler, err := enc.GetWriter(BadgerKeyFile, handler) + if err != nil { + return &emptyRes, err + } + gzWriter := gzip.NewWriter(newhandler) + stream := pr.DB.NewStreamAt(pr.Request.ReadTs) stream.LogPrefix = "Dgraph.Backup" stream.KeyToList = pr.toBackupList @@ -168,6 +181,7 @@ func (pr *Processor) WriteBackup(ctx context.Context) (*pb.Status, error) { glog.Errorf("While closing gzipped writer: %v", err) return &emptyRes, err } + if err = handler.Close(); err != nil { glog.Errorf("While closing handler: %v", err) return &emptyRes, err @@ -209,7 +223,8 @@ func (pr *Processor) CompleteBackup(ctx context.Context, manifest *Manifest) err // GoString implements the GoStringer interface for Manifest. func (m *Manifest) GoString() string { - return fmt.Sprintf(`Manifest{Since: %d, Groups: %v}`, m.Since, m.Groups) + return fmt.Sprintf(`Manifest{Since: %d, Groups: %v, Encrypted: %v}`, + m.Since, m.Groups, m.Encrypted) } func (pr *Processor) toBackupList(key []byte, itr *badger.Iterator) (*bpb.KVList, error) { diff --git a/ee/backup/restore.go b/ee/backup/restore.go index b756bb62907..2afa997a107 100644 --- a/ee/backup/restore.go +++ b/ee/backup/restore.go @@ -28,13 +28,14 @@ import ( bpb "github.com/dgraph-io/badger/v2/pb" "github.com/pkg/errors" + "github.com/dgraph-io/dgraph/ee/enc" "github.com/dgraph-io/dgraph/posting" "github.com/dgraph-io/dgraph/protos/pb" "github.com/dgraph-io/dgraph/x" ) // RunRestore calls badger.Load and tries to load data into a new DB. -func RunRestore(pdir, location, backupId string) LoadResult { +func RunRestore(pdir, location, backupId, keyfile string) LoadResult { // Create the pdir if it doesn't exist. if err := os.MkdirAll(pdir, 0700); err != nil { return LoadResult{0, 0, err} @@ -59,15 +60,18 @@ func RunRestore(pdir, location, backupId string) LoadResult { if !pathExist(dir) { fmt.Println("Creating new db:", dir) } + r, err = enc.GetReader(keyfile, r) + if err != nil { + return 0, err + } gzReader, err := gzip.NewReader(r) if err != nil { - return 0, nil + return 0, err } maxUid, err := loadFromBackup(db, gzReader, preds) if err != nil { return 0, err } - return maxUid, x.WriteGroupIdFile(dir, uint32(groupId)) }) } diff --git a/ee/backup/run.go b/ee/backup/run.go index 871d08d3de2..e5d90e269a0 100644 --- a/ee/backup/run.go +++ b/ee/backup/run.go @@ -32,7 +32,7 @@ var Restore x.SubCommand var LsBackup x.SubCommand var opt struct { - backupId, location, pdir, zero string + backupId, location, pdir, zero, keyfile string } func init() { @@ -104,6 +104,8 @@ $ dgraph restore -p . -l /var/backups/dgraph -z localhost:5080 flag.StringVarP(&opt.zero, "zero", "z", "", "gRPC address for Dgraph zero. ex: localhost:5080") flag.StringVarP(&opt.backupId, "backup_id", "", "", "The ID of the backup series to "+ "restore. If empty, it will restore the latest series.") + flag.StringVarP(&opt.keyfile, "keyfile", "k", "", + "Key file to decrypt the backup") _ = Restore.Cmd.MarkFlagRequired("postings") _ = Restore.Cmd.MarkFlagRequired("location") } @@ -184,7 +186,7 @@ func runRestoreCmd() error { } start = time.Now() - result := RunRestore(opt.pdir, opt.location, opt.backupId) + result := RunRestore(opt.pdir, opt.location, opt.backupId, opt.keyfile) if result.Err != nil { return result.Err } @@ -225,9 +227,9 @@ func runLsbackupCmd() error { return errors.Wrapf(err, "while listing manifests") } - fmt.Printf("Name\tSince\tGroups\n") + fmt.Printf("Name\tSince\tGroups\tEncrypted\n") for path, manifest := range manifests { - fmt.Printf("%v\t%v\t%v\n", path, manifest.Since, manifest.Groups) + fmt.Printf("%v\t%v\t%v\t%v\n", path, manifest.Since, manifest.Groups, manifest.Encrypted) } return nil diff --git a/ee/backup/tests/encryption/backup_test.go b/ee/backup/tests/encryption/backup_test.go new file mode 100644 index 00000000000..79364bfaf82 --- /dev/null +++ b/ee/backup/tests/encryption/backup_test.go @@ -0,0 +1,339 @@ +/* + * Copyright 2018 Dgraph Labs, Inc. and Contributors * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + // "fmt" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/dgraph-io/dgo/v2" + "github.com/dgraph-io/dgo/v2/protos/api" + minio "github.com/minio/minio-go" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/dgraph-io/dgraph/ee/backup" + "github.com/dgraph-io/dgraph/testutil" + "github.com/dgraph-io/dgraph/x" +) + +var ( + backupDir = "./data/backups" + restoreDir = "./data/restore" + testDirs = []string{backupDir, restoreDir} + + mc *minio.Client + bucketName = "dgraph-backup" + backupDst = "minio://minio1:9001/dgraph-backup?secure=false" + localBackupDst = "minio://localhost:9001/dgraph-backup?secure=false" +) + +func TestBackupMinio(t *testing.T) { + conn, err := grpc.Dial(testutil.SockAddr, grpc.WithInsecure()) + require.NoError(t, err) + dg := dgo.NewDgraphClient(api.NewDgraphClient(conn)) + + mc, err = testutil.NewMinioClient() + require.NoError(t, err) + require.NoError(t, mc.MakeBucket(bucketName, "")) + + ctx := context.Background() + require.NoError(t, dg.Alter(ctx, &api.Operation{DropAll: true})) + + // Add schema and types. + require.NoError(t, dg.Alter(ctx, &api.Operation{Schema: `movie: string . + type Node { + movie + }`})) + + // Add initial data. + original, err := dg.NewTxn().Mutate(ctx, &api.Mutation{ + CommitNow: true, + SetNquads: []byte(` + <_:x1> "BIRDS MAN OR (THE UNEXPECTED VIRTUE OF IGNORANCE)" . + <_:x2> "Spotlight" . + <_:x3> "Moonlight" . + <_:x4> "THE SHAPE OF WATERLOO" . + <_:x5> "BLACK PUNTER" . + `), + }) + require.NoError(t, err) + t.Logf("--- Original uid mapping: %+v\n", original.Uids) + + // Move tablet to group 1 to avoid messes later. + _, err = http.Get("http://" + testutil.SockAddrZeroHttp + "/moveTablet?tablet=movie&group=1") + require.NoError(t, err) + + // After the move, we need to pause a bit to give zero a chance to quorum. + t.Log("Pausing to let zero move tablet...") + moveOk := false + for retry := 5; retry > 0; retry-- { + time.Sleep(3 * time.Second) + state, err := testutil.GetState() + require.NoError(t, err) + if _, ok := state.Groups["1"].Tablets["movie"]; ok { + moveOk = true + break + } + } + require.True(t, moveOk) + + // Setup environmental variables for use during restore. + os.Setenv("MINIO_ACCESS_KEY", "accesskey") + os.Setenv("MINIO_SECRET_KEY", "secretkey") + + // Setup test directories. + dirSetup(t) + + // Send backup request. + _ = runBackup(t, 3, 1) + restored := runRestore(t, "", math.MaxUint64) + + checks := []struct { + blank, expected string + }{ + {blank: "x1", expected: "BIRDS MAN OR (THE UNEXPECTED VIRTUE OF IGNORANCE)"}, + {blank: "x2", expected: "Spotlight"}, + {blank: "x3", expected: "Moonlight"}, + {blank: "x4", expected: "THE SHAPE OF WATERLOO"}, + {blank: "x5", expected: "BLACK PUNTER"}, + } + for _, check := range checks { + require.EqualValues(t, check.expected, restored[original.Uids[check.blank]]) + } + + // // Add more data for the incremental backup. + // incr1, err := dg.NewTxn().Mutate(ctx, &api.Mutation{ + // CommitNow: true, + // SetNquads: []byte(fmt.Sprintf(` + // <%s> "Birdman or (The Unexpected Virtue of Ignorance)" . + // <%s> "The Shape of Waterloo" . + // `, original.Uids["x1"], original.Uids["x4"])), + // }) + // t.Logf("%+v", incr1) + // require.NoError(t, err) + + // // Perform first incremental backup. + // _ = runBackup(t, 6, 2) + // restored = runRestore(t, "", incr1.Txn.CommitTs) + + // checks = []struct { + // blank, expected string + // }{ + // {blank: "x1", expected: "Birdman or (The Unexpected Virtue of Ignorance)"}, + // {blank: "x4", expected: "The Shape of Waterloo"}, + // } + // for _, check := range checks { + // require.EqualValues(t, check.expected, restored[original.Uids[check.blank]]) + // } + + // // Add more data for a second incremental backup. + // incr2, err := dg.NewTxn().Mutate(ctx, &api.Mutation{ + // CommitNow: true, + // SetNquads: []byte(fmt.Sprintf(` + // <%s> "The Shape of Water" . + // <%s> "The Black Panther" . + // `, original.Uids["x4"], original.Uids["x5"])), + // }) + // require.NoError(t, err) + + // // Perform second incremental backup. + // _ = runBackup(t, 9, 3) + // restored = runRestore(t, "", incr2.Txn.CommitTs) + + // checks = []struct { + // blank, expected string + // }{ + // {blank: "x4", expected: "The Shape of Water"}, + // {blank: "x5", expected: "The Black Panther"}, + // } + // for _, check := range checks { + // require.EqualValues(t, check.expected, restored[original.Uids[check.blank]]) + // } + + // // Add more data for a second full backup. + // incr3, err := dg.NewTxn().Mutate(ctx, &api.Mutation{ + // CommitNow: true, + // SetNquads: []byte(fmt.Sprintf(` + // <%s> "El laberinto del fauno" . + // <%s> "Black Panther 2" . + // `, original.Uids["x4"], original.Uids["x5"])), + // }) + // require.NoError(t, err) + + // // Perform second full backup. + // dirs := runBackupInternal(t, true, 12, 4) + // restored = runRestore(t, "", incr3.Txn.CommitTs) + + // // Check all the values were restored to their most recent value. + // checks = []struct { + // blank, expected string + // }{ + // {blank: "x1", expected: "Birdman or (The Unexpected Virtue of Ignorance)"}, + // {blank: "x2", expected: "Spotlight"}, + // {blank: "x3", expected: "Moonlight"}, + // {blank: "x4", expected: "El laberinto del fauno"}, + // {blank: "x5", expected: "Black Panther 2"}, + // } + // for _, check := range checks { + // require.EqualValues(t, check.expected, restored[original.Uids[check.blank]]) + // } + + // // Remove the full backup dirs and verify restore catches the error. + // require.NoError(t, os.RemoveAll(dirs[0])) + // require.NoError(t, os.RemoveAll(dirs[3])) + // runFailingRestore(t, backupDir, "", incr3.Txn.CommitTs) + + // Clean up test directories. + dirCleanup(t) +} + +func runBackup(t *testing.T, numExpectedFiles, numExpectedDirs int) []string { + return runBackupInternal(t, false, numExpectedFiles, numExpectedDirs) +} + +func runBackupInternal(t *testing.T, forceFull bool, numExpectedFiles, + numExpectedDirs int) []string { + forceFullStr := "false" + if forceFull { + forceFullStr = "true" + } + + resp, err := http.PostForm("http://localhost:8180/admin/backup", url.Values{ + "destination": []string{backupDst}, + "force_full": []string{forceFullStr}, + }) + require.NoError(t, err) + defer resp.Body.Close() + buf, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + require.Contains(t, string(buf), "Backup completed.") + + // Verify that the right amount of files and directories were created. + copyToLocalFs(t) + + files := x.WalkPathFunc(backupDir, func(path string, isdir bool) bool { + return !isdir && strings.HasSuffix(path, ".backup") + }) + require.Equal(t, numExpectedFiles, len(files)) + + dirs := x.WalkPathFunc(backupDir, func(path string, isdir bool) bool { + return isdir && strings.HasPrefix(path, "data/backups/dgraph.") + }) + require.Equal(t, numExpectedDirs, len(dirs)) + + manifests := x.WalkPathFunc(backupDir, func(path string, isdir bool) bool { + return !isdir && strings.Contains(path, "manifest.json") + }) + require.Equal(t, numExpectedDirs, len(manifests)) + + return dirs +} + +func runRestore(t *testing.T, lastDir string, commitTs uint64) map[string]string { + // Recreate the restore directory to make sure there's no previous data when + // calling restore. + require.NoError(t, os.RemoveAll(restoreDir)) + + t.Logf("--- Restoring from: %q", localBackupDst) + argv := []string{"dgraph", "restore", "-l", localBackupDst, "-p", "data/restore", "-k", "../../../enc/enc-key"} + cwd, err := os.Getwd() + require.NoError(t, err) + err = testutil.ExecWithOpts(argv, testutil.CmdOpts{Dir: cwd}) + require.NoError(t, err) + + for i, pdir := range []string{"p1", "p2", "p3"} { + pdir = filepath.Join("./data/restore", pdir) + groupId, err := x.ReadGroupIdFile(pdir) + require.NoError(t, err) + require.Equal(t, uint32(i+1), groupId) + } + pdir := "./data/restore/p1" + restored, err := testutil.GetPredicateValues(pdir, "movie", commitTs) + require.NoError(t, err) + + restoredPreds, err := testutil.GetPredicateNames(pdir, commitTs) + require.NoError(t, err) + require.ElementsMatch(t, []string{"dgraph.type", "movie"}, restoredPreds) + + restoredTypes, err := testutil.GetTypeNames(pdir, commitTs) + require.NoError(t, err) + require.ElementsMatch(t, []string{"Node"}, restoredTypes) + + require.NoError(t, err) + t.Logf("--- Restored values: %+v\n", restored) + + return restored +} + +// runFailingRestore is like runRestore but expects an error during restore. +func runFailingRestore(t *testing.T, backupLocation, lastDir string, commitTs uint64) { + // Recreate the restore directory to make sure there's no previous data when + // calling restore. + require.NoError(t, os.RemoveAll(restoreDir)) + + result := backup.RunRestore("./data/restore", backupLocation, lastDir, "../../../enc/enc-key") + require.Error(t, result.Err) + require.Contains(t, result.Err.Error(), "expected a BackupNum value of 1") +} + +func dirSetup(t *testing.T) { + // Clean up data from previous runs. + dirCleanup(t) + + for _, dir := range testDirs { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + t.Fatalf("Error while creaing directory: %s", err.Error()) + } + } +} + +func dirCleanup(t *testing.T) { + if err := os.RemoveAll("./data"); err != nil { + t.Fatalf("Error removing direcotory: %s", err.Error()) + } +} + +func copyToLocalFs(t *testing.T) { + // List all the folders in the bucket. + lsCh1 := make(chan struct{}) + defer close(lsCh1) + objectCh1 := mc.ListObjectsV2(bucketName, "", false, lsCh1) + for object := range objectCh1 { + require.NoError(t, object.Err) + dstDir := backupDir + "/" + object.Key + os.MkdirAll(dstDir, os.ModePerm) + + // Get all the files in that folder and copy them to the local filesystem. + lsCh2 := make(chan struct{}) + objectCh2 := mc.ListObjectsV2(bucketName, "", true, lsCh2) + for object := range objectCh2 { + require.NoError(t, object.Err) + dstFile := backupDir + "/" + object.Key + mc.FGetObject(bucketName, object.Key, dstFile, minio.GetObjectOptions{}) + } + close(lsCh2) + } +} diff --git a/ee/backup/tests/encryption/docker-compose.yml b/ee/backup/tests/encryption/docker-compose.yml new file mode 100644 index 00000000000..8ded37ff047 --- /dev/null +++ b/ee/backup/tests/encryption/docker-compose.yml @@ -0,0 +1,98 @@ +version: "3.5" +services: + zero1: + image: dgraph/dgraph:latest + container_name: zero1 + working_dir: /data/zero1 + labels: + cluster: test + ports: + - 5180:5180 + - 6180:6180 + command: /gobin/dgraph zero --cwd=/data/zero1 --my=zero1:5180 -o 100 --bindall --logtostderr -v=0 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + + alpha1: + image: dgraph/dgraph:latest + container_name: alpha1 + working_dir: /data/alpha1 + env_file: + - ../../backup.env + labels: + cluster: test + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../../enc/enc-key + target: /dgraph-enc/enc-key + read_only: true + ports: + - 8180:8180 + - 9180:9180 + command: /gobin/dgraph alpha --encryption_key_file "/dgraph-enc/enc-key" --cwd=/data/alpha1 --my=alpha1:7180 --lru_mb=1024 --zero=zero1:5180 -o 100 -v=0 --whitelist 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 + + alpha2: + image: dgraph/dgraph:latest + container_name: alpha2 + working_dir: /data/alpha2 + env_file: + - ../../backup.env + labels: + cluster: test + depends_on: + - alpha1 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../../enc/enc-key + target: /dgraph-enc/enc-key + read_only: true + ports: + - 8182:8182 + - 9182:9182 + command: /gobin/dgraph alpha --encryption_key_file "/dgraph-enc/enc-key" --cwd=/data/alpha2 --my=alpha2:7182 --lru_mb=1024 --zero=zero1:5180 -o 102 -v=0 --whitelist 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 + + alpha3: + image: dgraph/dgraph:latest + container_name: alpha3 + working_dir: /data/alpha3 + env_file: + - ../../backup.env + labels: + cluster: test + depends_on: + - alpha2 + volumes: + - type: bind + source: $GOPATH/bin + target: /gobin + read_only: true + - type: bind + source: ../../../enc/enc-key + target: /dgraph-enc/enc-key + read_only: true + ports: + - 8183:8183 + - 9183:9183 + command: /gobin/dgraph alpha --encryption_key_file "/dgraph-enc/enc-key" --cwd=/data/alpha3 --my=alpha3:7183 --lru_mb=1024 --zero=zero1:5180 -o 103 -v=0 --whitelist 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 + + minio1: + image: minio/minio:latest + container_name: minio1 + env_file: + - ../../backup.env + ports: + - 9001:9001 + labels: + cluster: test + command: minio server /data/minio --address :9001 diff --git a/ee/backup/tests/filesystem/backup_test.go b/ee/backup/tests/filesystem/backup_test.go index 2d500356e0c..322d0235a7c 100644 --- a/ee/backup/tests/filesystem/backup_test.go +++ b/ee/backup/tests/filesystem/backup_test.go @@ -252,7 +252,7 @@ func runRestore(t *testing.T, backupLocation, lastDir string, commitTs uint64) m require.NoError(t, os.RemoveAll(restoreDir)) t.Logf("--- Restoring from: %q", backupLocation) - result := backup.RunRestore("./data/restore", backupLocation, lastDir) + result := backup.RunRestore("./data/restore", backupLocation, lastDir, "") require.NoError(t, result.Err) for i, pdir := range []string{"p1", "p2", "p3"} { @@ -284,7 +284,7 @@ func runFailingRestore(t *testing.T, backupLocation, lastDir string, commitTs ui // calling restore. require.NoError(t, os.RemoveAll(restoreDir)) - result := backup.RunRestore("./data/restore", backupLocation, lastDir) + result := backup.RunRestore("./data/restore", backupLocation, lastDir, "") require.Error(t, result.Err) require.Contains(t, result.Err.Error(), "expected a BackupNum value of 1") } diff --git a/ee/backup/tests/minio-large/backup_test.go b/ee/backup/tests/minio-large/backup_test.go index 5e09fc32596..359f50fdde6 100644 --- a/ee/backup/tests/minio-large/backup_test.go +++ b/ee/backup/tests/minio-large/backup_test.go @@ -159,7 +159,7 @@ func runRestore(t *testing.T, backupLocation, lastDir string, commitTs uint64) m require.NoError(t, os.MkdirAll(restoreDir, os.ModePerm)) t.Logf("--- Restoring from: %q", backupLocation) - result := backup.RunRestore("./data/restore", backupLocation, lastDir) + result := backup.RunRestore("./data/restore", backupLocation, lastDir, "") require.NoError(t, result.Err) restored1, err := testutil.GetPredicateValues("./data/restore/p1", "name1", commitTs) diff --git a/ee/backup/tests/minio/backup_test.go b/ee/backup/tests/minio/backup_test.go index 462785fd3ea..2e08fe307cc 100644 --- a/ee/backup/tests/minio/backup_test.go +++ b/ee/backup/tests/minio/backup_test.go @@ -294,7 +294,7 @@ func runFailingRestore(t *testing.T, backupLocation, lastDir string, commitTs ui // calling restore. require.NoError(t, os.RemoveAll(restoreDir)) - result := backup.RunRestore("./data/restore", backupLocation, lastDir) + result := backup.RunRestore("./data/restore", backupLocation, lastDir, "") require.Error(t, result.Err) require.Contains(t, result.Err.Error(), "expected a BackupNum value of 1") } diff --git a/ee/enc/util_ee.go b/ee/enc/util_ee.go index df9d720a81a..9a37f57c5e6 100644 --- a/ee/enc/util_ee.go +++ b/ee/enc/util_ee.go @@ -13,7 +13,12 @@ package enc import ( + "crypto/aes" + "crypto/cipher" + "github.com/dgraph-io/badger/v2/y" "github.com/dgraph-io/dgraph/x" + "github.com/pkg/errors" + "io" "io/ioutil" ) @@ -35,3 +40,48 @@ func ReadEncryptionKeyFile(filepath string) []byte { return k } + +// GetWriter wraps a crypto StreamWriter on input Writer given a key file +func GetWriter(filepath string, w io.Writer) (io.Writer, error) { + // No encryption, return the input writer as is. + if filepath == "" { + return w, nil + } + // Encryption, wrap crypto StreamWriter on the input Writer. + c, err := aes.NewCipher(ReadEncryptionKeyFile(filepath)) + if err != nil { + return nil, err + } + iv, err := y.GenerateIV() + if err != nil { + return nil, err + } + if iv != nil { + if _, err = w.Write(iv); err != nil { + return nil, err + } + } + return cipher.StreamWriter{S: cipher.NewCTR(c, iv), W: w}, nil +} + +// GetReader returns a crypto StreamReader on the input Reader given a key file. +func GetReader(filepath string, r io.Reader) (io.Reader, error) { + // No encryption, return input reader as is. + if filepath == "" { + return r, nil + } + + // Encryption, wrap crypto StreamReader on input Reader. + c, err := aes.NewCipher(ReadEncryptionKeyFile(filepath)) + if err != nil { + return nil, err + } + var iv []byte = make([]byte, 16) + cnt, err := r.Read(iv) + if cnt != 16 || err != nil { + err = errors.Errorf("unable to get IV from encrypted backup. Read %v bytes, err %v ", + cnt, err) + return nil, err + } + return cipher.StreamReader{S: cipher.NewCTR(c, iv), R: r}, nil +}