-
Notifications
You must be signed in to change notification settings - Fork 1.5k
/
Copy pathrestore.go
243 lines (216 loc) · 7.51 KB
/
restore.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
// +build !oss
/*
* Copyright 2019 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Dgraph Community License (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* https://github.com/dgraph-io/dgraph/blob/master/licenses/DCL.txt
*/
package worker
import (
"bufio"
"compress/gzip"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"math"
"os"
"path/filepath"
"github.com/dgraph-io/badger/v2"
"github.com/dgraph-io/badger/v2/options"
bpb "github.com/dgraph-io/badger/v2/pb"
"github.com/pkg/errors"
"github.com/dgraph-io/dgraph/ee/enc"
"github.com/dgraph-io/dgraph/posting"
"github.com/dgraph-io/dgraph/protos/pb"
"github.com/dgraph-io/dgraph/x"
)
// RunRestore calls badger.Load and tries to load data into a new DB.
func RunRestore(pdir, location, backupId, keyfile string) LoadResult {
// Create the pdir if it doesn't exist.
if err := os.MkdirAll(pdir, 0700); err != nil {
return LoadResult{0, 0, err}
}
// Scan location for backup files and load them. Each file represents a node group,
// and we create a new p dir for each.
return LoadBackup(location, backupId,
func(r io.Reader, groupId int, preds predicateSet) (uint64, error) {
dir := filepath.Join(pdir, fmt.Sprintf("p%d", groupId))
r, err := enc.GetReader(keyfile, r)
if err != nil {
return 0, err
}
gzReader, err := gzip.NewReader(r)
if err != nil {
return 0, errors.Wrap(err,
"Unable to read the backup. Ensure encryption key is correct.")
}
// The badger DB should be opened only after creating the backup
// file reader. The following sequence of events can occur if
// badger is opened before creating a reader -
// 1. Backup is created without encryption (alpha running without encryption)
// 2. Restore is called on an unencrypted backup with encryption set.
// dgraph restore --encryption_key=xxx
// If badger was opened before enc.GetReader() then the new
// badger DB would have encryption set. Eventually
// enc.GetReader() would crash because of invalid key but
// encryption would be set in badger.
// 3. Re-attempt restore with encryption key not set.
// dgraph restore
// This command would fail with "encryption key
// mismatch" because the badger DB initialized by the previous
// restore command has encryption set and the current call
// doesn't have the encryption key set.
//
db, err := badger.OpenManaged(badger.DefaultOptions(dir).
WithSyncWrites(false).
WithTableLoadingMode(options.MemoryMap).
WithValueThreshold(1 << 10).
WithNumVersionsToKeep(math.MaxInt32).
WithEncryptionKey(enc.ReadEncryptionKeyFile(keyfile)))
if err != nil {
return 0, err
}
defer db.Close()
if !pathExist(dir) {
fmt.Println("Creating new db:", dir)
}
maxUid, err := loadFromBackup(db, gzReader, 0, preds)
if err != nil {
return 0, err
}
return maxUid, x.WriteGroupIdFile(dir, uint32(groupId))
})
}
// loadFromBackup reads the backup, converts the keys and values to the required format,
// and loads them to the given badger DB. The set of predicates is used to avoid restoring
// values from predicates no longer assigned to this group.
// If restoreTs is greater than zero, the key-value pairs will be written with that timestamp.
// Otherwise, the original value is used.
// TODO(DGRAPH-1234): Check whether restoreTs can be removed.
func loadFromBackup(db *badger.DB, r io.Reader, restoreTs uint64, preds predicateSet) (
uint64, error) {
br := bufio.NewReaderSize(r, 16<<10)
unmarshalBuf := make([]byte, 1<<10)
// Delete schemas and types. Each backup file should have a complete copy of the schema.
if err := db.DropPrefix([]byte{x.ByteSchema}); err != nil {
return 0, err
}
if err := db.DropPrefix([]byte{x.ByteType}); err != nil {
return 0, err
}
loader := db.NewKVLoader(16)
var maxUid uint64
for {
var sz uint64
err := binary.Read(br, binary.LittleEndian, &sz)
if err == io.EOF {
break
} else if err != nil {
return 0, err
}
if cap(unmarshalBuf) < int(sz) {
unmarshalBuf = make([]byte, sz)
}
if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil {
return 0, err
}
list := &bpb.KVList{}
if err := list.Unmarshal(unmarshalBuf[:sz]); err != nil {
return 0, err
}
for _, kv := range list.Kv {
if len(kv.GetUserMeta()) != 1 {
return 0, errors.Errorf(
"Unexpected meta: %v for key: %s", kv.UserMeta, hex.Dump(kv.Key))
}
restoreKey, err := fromBackupKey(kv.Key)
if err != nil {
return 0, err
}
// Filter keys using the preds set. Do not do this filtering for type keys
// as they are meant to be in every group and their Attr value does not
// match a predicate name.
parsedKey, err := x.Parse(restoreKey)
if err != nil {
return 0, errors.Wrapf(err, "could not parse key %s", hex.Dump(restoreKey))
}
if _, ok := preds[parsedKey.Attr]; !parsedKey.IsType() && !ok {
continue
}
// Update the max id that has been seen while restoring this backup.
if parsedKey.Uid > maxUid {
maxUid = parsedKey.Uid
}
// Override the version if requested. Should not be done for type and schema predicates,
// which always have their version set to 1.
if restoreTs > 0 && !parsedKey.IsSchema() && !parsedKey.IsType() {
kv.Version = restoreTs
}
switch kv.GetUserMeta()[0] {
case posting.BitEmptyPosting, posting.BitCompletePosting, posting.BitDeltaPosting:
backupPl := &pb.BackupPostingList{}
if err := backupPl.Unmarshal(kv.Value); err != nil {
return 0, errors.Wrapf(err, "while reading backup posting list")
}
pl := posting.FromBackupPostingList(backupPl)
shouldSplit := pl.Size() >= (1<<20)/2 && len(pl.Pack.Blocks) > 1
if !shouldSplit || parsedKey.HasStartUid || len(pl.GetSplits()) > 0 {
// This covers two cases.
// 1. The list is not big enough to be split.
// 2. This key is storing part of a multi-part list. Write each individual
// part without rolling the key first. This part is here for backwards
// compatibility. New backups are not affected because there was a change
// to roll up lists into a single one.
restoreVal, err := pl.Marshal()
if err != nil {
return 0, errors.Wrapf(err, "while converting backup posting list")
}
kv.Key = restoreKey
kv.Value = restoreVal
if err := loader.Set(kv); err != nil {
return 0, err
}
} else {
// This is a complete list. It should be rolled up to avoid writing
// a list that is too big to be read back from disk.
l := posting.NewList(restoreKey, pl, kv.Version)
kvs, err := l.Rollup()
if err != nil {
// TODO: wrap errors in this file for easier debugging.
return 0, err
}
for _, kv := range kvs {
if err := loader.Set(kv); err != nil {
return 0, err
}
}
}
case posting.BitSchemaPosting:
// Schema and type keys are not stored in an intermediate format so their
// value can be written as is.
kv.Key = restoreKey
if err := loader.Set(kv); err != nil {
return 0, err
}
default:
return 0, errors.Errorf(
"Unexpected meta %d for key %s", kv.UserMeta[0], hex.Dump(kv.Key))
}
}
}
if err := loader.Finish(); err != nil {
return 0, err
}
return maxUid, nil
}
func fromBackupKey(key []byte) ([]byte, error) {
backupKey := &pb.BackupKey{}
if err := backupKey.Unmarshal(key); err != nil {
return nil, errors.Wrapf(err, "while reading backup key %s", hex.Dump(key))
}
return x.FromBackupKey(backupKey), nil
}