Skip to content

Commit

Permalink
Chainwork column added to Auxilary database. Chainwork and network ha…
Browse files Browse the repository at this point in the history
…shrate charts implemented.
  • Loading branch information
buck54321 committed Nov 17, 2018
1 parent 4ce1350 commit f8b56fa
Show file tree
Hide file tree
Showing 15 changed files with 299 additions and 19 deletions.
8 changes: 7 additions & 1 deletion cmd/rebuilddb2/rebuilddb2.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,12 @@ func mainCore() error {
return fmt.Errorf("GetBlock failed (%s): %v", blockHash, err)
}

// Grab the chainwork.
chainWork, err := rpcutils.GetChainWork(client, blockHash)
if err != nil {
return fmt.Errorf("GetChainWork failed (%s): %v", blockHash, err)
}

// stake db always has genesis, so do not connect it
var winners []string
if ib > 0 {
Expand All @@ -339,7 +345,7 @@ func mainCore() error {
isValid, isMainchain, updateExistingRecords := true, true, true
numVins, numVouts, _, err = db.StoreBlock(block.MsgBlock(), winners,
isValid, isMainchain, updateExistingRecords,
cfg.AddrSpendInfoOnline, !cfg.TicketSpendInfoBatch)
cfg.AddrSpendInfoOnline, !cfg.TicketSpendInfoBatch, chainWork)
if err != nil {
return fmt.Errorf("StoreBlock failed: %v", err)
}
Expand Down
3 changes: 2 additions & 1 deletion db/dbtypes/conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
)

// MsgBlockToDBBlock creates a dbtypes.Block from a wire.MsgBlock
func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params) *Block {
func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params, chainWork string) *Block {
// Create the dbtypes.Block structure
blockHeader := msgBlock.Header

Expand Down Expand Up @@ -55,6 +55,7 @@ func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params) *B
ExtraData: blockHeader.ExtraData[:],
StakeVersion: blockHeader.StakeVersion,
PreviousHash: blockHeader.PrevBlock.String(),
ChainWork: chainWork,
}
}

Expand Down
3 changes: 3 additions & 0 deletions db/dbtypes/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,8 @@ type ChartsData struct {
Received []float64 `json:"received,omitempty"`
Sent []float64 `json:"sent,omitempty"`
Net []float64 `json:"net,omitempty"`
ChainWork []uint64 `json:"chainwork,omitempty"`
NetHash []uint64 `json:"nethash,omitempty"`
}

// ScriptPubKeyData is part of the result of decodescript(ScriptPubKeyHex)
Expand Down Expand Up @@ -641,6 +643,7 @@ type Block struct {
ExtraData []byte `json:"extradata"`
StakeVersion uint32 `json:"stakeversion"`
PreviousHash string `json:"previousblockhash"`
ChainWork string `json:"chainwork"`
}

type BlockDataBasic struct {
Expand Down
11 changes: 9 additions & 2 deletions db/dcrpg/chainmonitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,12 +221,19 @@ func (p *ChainMonitor) switchToSideChain() (int32, *chainhash.Hash, error) {
}
winners := tpi.Winners

// Get the chainWork
blockHash := msgBlock.BlockHash()
chainWork, err := p.db.GetChainWork(&blockHash)
if err != nil {
return 0, nil, fmt.Errorf("GetChainWork failed (%s): %v", blockHash.String(), err)
}

// New blocks stored this way are considered part of mainchain. They are
// also considered valid unless invalidated by the next block
// (invalidation of previous handled inside StoreBlock).
isValid, isMainChain, updateExisting := true, true, true
_, _, _, err := p.db.StoreBlock(msgBlock, winners, isValid, isMainChain,
updateExisting, true, true)
_, _, _, err = p.db.StoreBlock(msgBlock, winners, isValid, isMainChain,
updateExisting, true, true, chainWork)
if err != nil {
return int32(p.db.Height()), p.db.Hash(),
fmt.Errorf("error connecting block %v", p.sideChain[i])
Expand Down
18 changes: 11 additions & 7 deletions db/dcrpg/internal/blockstmts.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (
)

const (
CreateBlockTable = `CREATE TABLE IF NOT EXISTS blocks (
CreateBlockTable = `CREATE TABLE IF NOT EXISTS blocks (
id SERIAL PRIMARY KEY,
hash TEXT NOT NULL, -- UNIQUE
height INT4,
Expand Down Expand Up @@ -37,7 +37,8 @@ const (
difficulty FLOAT8,
extra_data BYTEA,
stake_version INT4,
previous_hash TEXT
previous_hash TEXT,
chainwork TEXT
);`

// Block inserts. is_valid refers to blocks that have been validated by
Expand All @@ -50,13 +51,13 @@ const (
hash, height, size, is_valid, is_mainchain, version, merkle_root, stake_root,
numtx, num_rtx, tx, txDbIDs, num_stx, stx, stxDbIDs,
time, nonce, vote_bits, final_state, voters,
fresh_stake, revocations, pool_size, bits, sbits,
difficulty, extra_data, stake_version, previous_hash)
fresh_stake, revocations, pool_size, bits, sbits,
difficulty, extra_data, stake_version, previous_hash, chainwork)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8,
$9, $10, %s, %s, $11, %s, %s,
$12, $13, $14, $15, $16,
$12, $13, $14, $15, $16,
$17, $18, $19, $20, $21,
$22, $23, $24, $25) `
$22, $23, $24, $25, $26) `

// InsertBlockRow inserts a new block row without checking for unique index
// conflicts. This should only be used before the unique indexes are created
Expand All @@ -65,7 +66,7 @@ const (

// UpsertBlockRow is an upsert (insert or update on conflict), returning
// the inserted/updated block row id.
UpsertBlockRow = insertBlockRow + `ON CONFLICT (hash) DO UPDATE
UpsertBlockRow = insertBlockRow + `ON CONFLICT (hash) DO UPDATE
SET is_valid = $4, is_mainchain = $5 RETURNING id;`

// InsertBlockRowOnConflictDoNothing allows an INSERT with a DO NOTHING on
Expand Down Expand Up @@ -200,6 +201,9 @@ const (
UpdateBlockNext = `UPDATE block_chain SET next_hash = $2 WHERE block_db_id = $1;`
UpdateBlockNextByHash = `UPDATE block_chain SET next_hash = $2 WHERE this_hash = $1;`

// Grab the timestamp and chainwork.
SelectChainWork = `SELECT time, chainwork FROM blocks WHERE is_mainchain = true ORDER BY time;`

// TODO: index block_chain where needed
)

Expand Down
19 changes: 16 additions & 3 deletions db/dcrpg/pgblockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1596,7 +1596,7 @@ func (pgb *ChainDB) Store(blockData *blockdata.BlockData, msgBlock *wire.MsgBloc

_, _, _, err := pgb.StoreBlock(msgBlock, blockData.WinningTickets,
isValid, isMainChain, updateExistingRecords,
updateAddressesSpendingInfo, updateTicketsSpendingInfo)
updateAddressesSpendingInfo, updateTicketsSpendingInfo, blockData.Header.ChainWork)
return err
}

Expand Down Expand Up @@ -1720,6 +1720,11 @@ func (pgb *ChainDB) GetPgChartsData() (map[string]*dbtypes.ChartsData, error) {
return nil, fmt.Errorf("retrieveTicketByOutputCount by All TP window: %v", err)
}

chainWork, hashrates, err := retrieveChainWork(pgb.db)
if err != nil {
return nil, fmt.Errorf("retrieveChainWork: %v", err)
}

data := map[string]*dbtypes.ChartsData{
"avg-block-size": {Time: size.Time, Size: size.Size},
"blockchain-size": {Time: size.Time, ChainSize: size.ChainSize},
Expand All @@ -1732,6 +1737,8 @@ func (pgb *ChainDB) GetPgChartsData() (map[string]*dbtypes.ChartsData, error) {
"ticket-spend-type": ticketsSpendType,
"ticket-by-outputs-blocks": ticketsByOutputsAllBlocks,
"ticket-by-outputs-windows": ticketsByOutputsTPWindow,
"chainwork": chainWork,
"hashrate": hashrates,
}

return data, nil
Expand Down Expand Up @@ -1945,9 +1952,9 @@ func (pgb *ChainDB) TipToSideChain(mainRoot string) (string, int64, error) {
// The number of vins and vouts stored are returned.
func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, winningTickets []string,
isValid, isMainchain, updateExistingRecords, updateAddressesSpendingInfo,
updateTicketsSpendingInfo bool) (numVins int64, numVouts int64, numAddresses int64, err error) {
updateTicketsSpendingInfo bool, chainWork string) (numVins int64, numVouts int64, numAddresses int64, err error) {
// Convert the wire.MsgBlock to a dbtypes.Block
dbBlock := dbtypes.MsgBlockToDBBlock(msgBlock, pgb.chainParams)
dbBlock := dbtypes.MsgBlockToDBBlock(msgBlock, pgb.chainParams, chainWork)

// Get the previous winners (stake DB pool info cache has this info). If the
// previous block is side chain, stakedb will not have the
Expand Down Expand Up @@ -2720,3 +2727,9 @@ func ticketpoolStatusSlice(ss dbtypes.TicketPoolStatus, N int) []dbtypes.TicketP
}
return S
}

// GetChainwork fetches the dcrjson.BlockHeaderVerbose and returns only the ChainWork
// attribute as a hex-encoded string, without 0x prefix.
func (db *ChainDBRPC) GetChainWork(hash *chainhash.Hash) (string, error) {
return rpcutils.GetChainWork(db.Client, hash)
}
86 changes: 85 additions & 1 deletion db/dcrpg/queries.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@ import (
"database/sql"
"encoding/hex"
"fmt"
"math/big"
"strings"
"time"

"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
Expand Down Expand Up @@ -2522,6 +2524,88 @@ func retrieveTicketByOutputCount(ctx context.Context, db *sql.DB, dataType outpu
return items, nil
}

// retrieveChainWork assembles both block-by-block chainwork data
// and a rolling average for network hashrate data.
func retrieveChainWork(db *sql.DB) (*dbtypes.ChartsData, *dbtypes.ChartsData, error) {
// Grab all chainwork points in rows of (time, chainwork).
rows, err := db.Query(internal.SelectChainWork)
if err != nil {
return nil, nil, err
}
defer closeRows(rows)

// Assemble chainwork and hashrate simultaneously.
// Chainwork is stored as a 32-byte hex string, so in order to
// do math, math/big types are used.
workdata := new(dbtypes.ChartsData)
hashrates := new(dbtypes.ChartsData)
var blocktime dbtypes.TimeDef
var workhex string

// In order to store these large values as uint64, they are represented
// as exahash (10^18) for work, and terahash/s (10^12) for hashrate.
bigExa := big.NewInt(int64(1e18))
bigTera := big.NewInt(int64(1e12))

// chainWorkPt is stored for a rolling average.
type chainWorkPt struct {
work *big.Int
time time.Time
}
// How many blocks to average across for hashrate.
// 120 is the default returned by the RPC method `getnetworkhashps`.
var averagingLength int = 120
// points is used as circular storage.
points := make([]chainWorkPt, averagingLength)
var thisPt, lastPt chainWorkPt
var idx, workingIdx, lastIdx int
for rows.Next() {
// Get the chainwork.
err = rows.Scan(&blocktime.T, &workhex)
if err != nil {
return nil, nil, err
}
bigwork := new(big.Int)
exawork := new(big.Int)
bigwork, ok := bigwork.SetString(workhex, 16)
if !ok {
log.Errorf("Failed to make big.Int from chainwork %s", workhex)
break
}
exawork.Set(bigwork)
exawork.Div(bigwork, bigExa)
if !exawork.IsUint64() {
log.Errorf("Failed to make uint64 from chainwork %s", workhex)
break
}
workdata.ChainWork = append(workdata.ChainWork, exawork.Uint64())
workdata.Time = append(workdata.Time, blocktime)

workingIdx = idx % averagingLength
points[workingIdx] = chainWorkPt{bigwork, blocktime.T}
if idx >= averagingLength {
// lastIdx is actually the point averagingLength blocks ago.
lastIdx = (workingIdx + 1) % averagingLength
lastPt = points[lastIdx]
thisPt = points[workingIdx]
diff := new(big.Int)
diff.Set(thisPt.work)
diff.Sub(diff, lastPt.work)
rate := diff.Div(diff, big.NewInt(int64(thisPt.time.Sub(lastPt.time).Seconds())))
rate.Div(rate, bigTera)
if !rate.IsUint64() {
log.Errorf("Failed to make uint64 from rate")
break
}
tDef := dbtypes.TimeDef{thisPt.time}
hashrates.Time = append(hashrates.Time, tDef)
hashrates.NetHash = append(hashrates.NetHash, rate.Uint64())
}
idx += 1
}
return workdata, hashrates, nil
}

// --- blocks and block_chain tables ---

func InsertBlock(db *sql.DB, dbBlock *dbtypes.Block, isValid, isMainchain, checked bool) (uint64, error) {
Expand All @@ -2535,7 +2619,7 @@ func InsertBlock(db *sql.DB, dbBlock *dbtypes.Block, isValid, isMainchain, check
dbBlock.FinalState, dbBlock.Voters, dbBlock.FreshStake,
dbBlock.Revocations, dbBlock.PoolSize, dbBlock.Bits,
dbBlock.SBits, dbBlock.Difficulty, dbBlock.ExtraData,
dbBlock.StakeVersion, dbBlock.PreviousHash).Scan(&id)
dbBlock.StakeVersion, dbBlock.PreviousHash, dbBlock.ChainWork).Scan(&id)
return id, err
}

Expand Down
8 changes: 7 additions & 1 deletion db/dcrpg/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,13 +289,19 @@ func (db *ChainDB) SyncChainDB(ctx context.Context, client rpcutils.MasterBlockG
}
winners := tpi.Winners

// Get the chainwork
chainWork, err := client.GetChainWork(blockHash)
if err != nil {
return ib - 1, fmt.Errorf("GetChainWork failed (%s): %v", blockHash, err)
}

// Store data from this block in the database
isValid, isMainchain := true, true
// updateExisting is ignored if dupCheck=false, but true since this is
// processing main chain blocks.
updateExisting := true
numVins, numVouts, numAddresses, err := db.StoreBlock(block.MsgBlock(), winners, isValid,
isMainchain, updateExisting, !updateAllAddresses, !updateAllVotes)
isMainchain, updateExisting, !updateAllAddresses, !updateAllVotes, chainWork)
if err != nil {
return ib - 1, fmt.Errorf("StoreBlock failed: %v", err)
}
Expand Down
3 changes: 2 additions & 1 deletion db/dcrpg/tables.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ type dropDuplicatesInfo struct {
// re-indexing and a duplicate scan/purge.
const (
tableMajor = 3
tableMinor = 6
tableMinor = 7
tablePatch = 0
)

Expand Down Expand Up @@ -210,6 +210,7 @@ func CreateTables(db *sql.DB) error {
log.Tracef("Table \"%s\" exist.", tableName)
}
}

return err
}

Expand Down
Loading

0 comments on commit f8b56fa

Please sign in to comment.