diff --git a/cmd/rebuilddb2/rebuilddb2.go b/cmd/rebuilddb2/rebuilddb2.go
index c2ed093d0a..364d059cc9 100644
--- a/cmd/rebuilddb2/rebuilddb2.go
+++ b/cmd/rebuilddb2/rebuilddb2.go
@@ -318,6 +318,12 @@ func mainCore() error {
return fmt.Errorf("GetBlock failed (%s): %v", blockHash, err)
}
+ // Grab the chainwork.
+ chainWork, err := rpcutils.GetChainWork(client, blockHash)
+ if err != nil {
+ return fmt.Errorf("GetChainWork failed (%s): %v", blockHash, err)
+ }
+
// stake db always has genesis, so do not connect it
var winners []string
if ib > 0 {
@@ -339,7 +345,7 @@ func mainCore() error {
isValid, isMainchain, updateExistingRecords := true, true, true
numVins, numVouts, _, err = db.StoreBlock(block.MsgBlock(), winners,
isValid, isMainchain, updateExistingRecords,
- cfg.AddrSpendInfoOnline, !cfg.TicketSpendInfoBatch)
+ cfg.AddrSpendInfoOnline, !cfg.TicketSpendInfoBatch, chainWork)
if err != nil {
return fmt.Errorf("StoreBlock failed: %v", err)
}
diff --git a/db/dbtypes/conversion.go b/db/dbtypes/conversion.go
index 2485c3e2fe..b187bde9a9 100644
--- a/db/dbtypes/conversion.go
+++ b/db/dbtypes/conversion.go
@@ -10,7 +10,7 @@ import (
)
// MsgBlockToDBBlock creates a dbtypes.Block from a wire.MsgBlock
-func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params) *Block {
+func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params, chainWork string) *Block {
// Create the dbtypes.Block structure
blockHeader := msgBlock.Header
@@ -55,6 +55,7 @@ func MsgBlockToDBBlock(msgBlock *wire.MsgBlock, chainParams *chaincfg.Params) *B
ExtraData: blockHeader.ExtraData[:],
StakeVersion: blockHeader.StakeVersion,
PreviousHash: blockHeader.PrevBlock.String(),
+ ChainWork: chainWork,
}
}
diff --git a/db/dbtypes/types.go b/db/dbtypes/types.go
index aa1dc46009..8064871448 100644
--- a/db/dbtypes/types.go
+++ b/db/dbtypes/types.go
@@ -506,6 +506,8 @@ type ChartsData struct {
Received []float64 `json:"received,omitempty"`
Sent []float64 `json:"sent,omitempty"`
Net []float64 `json:"net,omitempty"`
+ ChainWork []uint64 `json:"chainwork,omitempty"`
+ NetHash []uint64 `json:"nethash,omitempty"`
}
// ScriptPubKeyData is part of the result of decodescript(ScriptPubKeyHex)
@@ -641,6 +643,7 @@ type Block struct {
ExtraData []byte `json:"extradata"`
StakeVersion uint32 `json:"stakeversion"`
PreviousHash string `json:"previousblockhash"`
+ ChainWork string `json:"chainwork"`
}
type BlockDataBasic struct {
diff --git a/db/dcrpg/chainmonitor.go b/db/dcrpg/chainmonitor.go
index 612503b234..f8ada87089 100644
--- a/db/dcrpg/chainmonitor.go
+++ b/db/dcrpg/chainmonitor.go
@@ -221,12 +221,19 @@ func (p *ChainMonitor) switchToSideChain() (int32, *chainhash.Hash, error) {
}
winners := tpi.Winners
+ // Get the chainWork
+ blockHash := msgBlock.BlockHash()
+ chainWork, err := p.db.GetChainWork(&blockHash)
+ if err != nil {
+ return 0, nil, fmt.Errorf("GetChainWork failed (%s): %v", blockHash.String(), err)
+ }
+
// New blocks stored this way are considered part of mainchain. They are
// also considered valid unless invalidated by the next block
// (invalidation of previous handled inside StoreBlock).
isValid, isMainChain, updateExisting := true, true, true
- _, _, _, err := p.db.StoreBlock(msgBlock, winners, isValid, isMainChain,
- updateExisting, true, true)
+ _, _, _, err = p.db.StoreBlock(msgBlock, winners, isValid, isMainChain,
+ updateExisting, true, true, chainWork)
if err != nil {
return int32(p.db.Height()), p.db.Hash(),
fmt.Errorf("error connecting block %v", p.sideChain[i])
diff --git a/db/dcrpg/internal/blockstmts.go b/db/dcrpg/internal/blockstmts.go
index ce2df857ec..bb545f6484 100644
--- a/db/dcrpg/internal/blockstmts.go
+++ b/db/dcrpg/internal/blockstmts.go
@@ -7,7 +7,7 @@ import (
)
const (
- CreateBlockTable = `CREATE TABLE IF NOT EXISTS blocks (
+ CreateBlockTable = `CREATE TABLE IF NOT EXISTS blocks (
id SERIAL PRIMARY KEY,
hash TEXT NOT NULL, -- UNIQUE
height INT4,
@@ -37,7 +37,8 @@ const (
difficulty FLOAT8,
extra_data BYTEA,
stake_version INT4,
- previous_hash TEXT
+ previous_hash TEXT,
+ chainwork TEXT
);`
// Block inserts. is_valid refers to blocks that have been validated by
@@ -50,13 +51,13 @@ const (
hash, height, size, is_valid, is_mainchain, version, merkle_root, stake_root,
numtx, num_rtx, tx, txDbIDs, num_stx, stx, stxDbIDs,
time, nonce, vote_bits, final_state, voters,
- fresh_stake, revocations, pool_size, bits, sbits,
- difficulty, extra_data, stake_version, previous_hash)
+ fresh_stake, revocations, pool_size, bits, sbits,
+ difficulty, extra_data, stake_version, previous_hash, chainwork)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8,
$9, $10, %s, %s, $11, %s, %s,
- $12, $13, $14, $15, $16,
+ $12, $13, $14, $15, $16,
$17, $18, $19, $20, $21,
- $22, $23, $24, $25) `
+ $22, $23, $24, $25, $26) `
// InsertBlockRow inserts a new block row without checking for unique index
// conflicts. This should only be used before the unique indexes are created
@@ -65,7 +66,7 @@ const (
// UpsertBlockRow is an upsert (insert or update on conflict), returning
// the inserted/updated block row id.
- UpsertBlockRow = insertBlockRow + `ON CONFLICT (hash) DO UPDATE
+ UpsertBlockRow = insertBlockRow + `ON CONFLICT (hash) DO UPDATE
SET is_valid = $4, is_mainchain = $5 RETURNING id;`
// InsertBlockRowOnConflictDoNothing allows an INSERT with a DO NOTHING on
@@ -200,6 +201,9 @@ const (
UpdateBlockNext = `UPDATE block_chain SET next_hash = $2 WHERE block_db_id = $1;`
UpdateBlockNextByHash = `UPDATE block_chain SET next_hash = $2 WHERE this_hash = $1;`
+ // Grab the timestamp and chainwork.
+ SelectChainWork = `SELECT time, chainwork FROM blocks WHERE is_mainchain = true ORDER BY time;`
+
// TODO: index block_chain where needed
)
diff --git a/db/dcrpg/pgblockchain.go b/db/dcrpg/pgblockchain.go
index b8343972f9..e16c2b3b32 100644
--- a/db/dcrpg/pgblockchain.go
+++ b/db/dcrpg/pgblockchain.go
@@ -1596,7 +1596,7 @@ func (pgb *ChainDB) Store(blockData *blockdata.BlockData, msgBlock *wire.MsgBloc
_, _, _, err := pgb.StoreBlock(msgBlock, blockData.WinningTickets,
isValid, isMainChain, updateExistingRecords,
- updateAddressesSpendingInfo, updateTicketsSpendingInfo)
+ updateAddressesSpendingInfo, updateTicketsSpendingInfo, blockData.Header.ChainWork)
return err
}
@@ -1720,6 +1720,11 @@ func (pgb *ChainDB) GetPgChartsData() (map[string]*dbtypes.ChartsData, error) {
return nil, fmt.Errorf("retrieveTicketByOutputCount by All TP window: %v", err)
}
+ chainWork, hashrates, err := retrieveChainWork(pgb.db)
+ if err != nil {
+ return nil, fmt.Errorf("retrieveChainWork: %v", err)
+ }
+
data := map[string]*dbtypes.ChartsData{
"avg-block-size": {Time: size.Time, Size: size.Size},
"blockchain-size": {Time: size.Time, ChainSize: size.ChainSize},
@@ -1732,6 +1737,8 @@ func (pgb *ChainDB) GetPgChartsData() (map[string]*dbtypes.ChartsData, error) {
"ticket-spend-type": ticketsSpendType,
"ticket-by-outputs-blocks": ticketsByOutputsAllBlocks,
"ticket-by-outputs-windows": ticketsByOutputsTPWindow,
+ "chainwork": chainWork,
+ "hashrate": hashrates,
}
return data, nil
@@ -1945,9 +1952,9 @@ func (pgb *ChainDB) TipToSideChain(mainRoot string) (string, int64, error) {
// The number of vins and vouts stored are returned.
func (pgb *ChainDB) StoreBlock(msgBlock *wire.MsgBlock, winningTickets []string,
isValid, isMainchain, updateExistingRecords, updateAddressesSpendingInfo,
- updateTicketsSpendingInfo bool) (numVins int64, numVouts int64, numAddresses int64, err error) {
+ updateTicketsSpendingInfo bool, chainWork string) (numVins int64, numVouts int64, numAddresses int64, err error) {
// Convert the wire.MsgBlock to a dbtypes.Block
- dbBlock := dbtypes.MsgBlockToDBBlock(msgBlock, pgb.chainParams)
+ dbBlock := dbtypes.MsgBlockToDBBlock(msgBlock, pgb.chainParams, chainWork)
// Get the previous winners (stake DB pool info cache has this info). If the
// previous block is side chain, stakedb will not have the
@@ -2720,3 +2727,9 @@ func ticketpoolStatusSlice(ss dbtypes.TicketPoolStatus, N int) []dbtypes.TicketP
}
return S
}
+
+// GetChainwork fetches the dcrjson.BlockHeaderVerbose and returns only the ChainWork
+// attribute as a hex-encoded string, without 0x prefix.
+func (db *ChainDBRPC) GetChainWork(hash *chainhash.Hash) (string, error) {
+ return rpcutils.GetChainWork(db.Client, hash)
+}
diff --git a/db/dcrpg/queries.go b/db/dcrpg/queries.go
index 23aabff31b..f1afd5a50a 100644
--- a/db/dcrpg/queries.go
+++ b/db/dcrpg/queries.go
@@ -10,7 +10,9 @@ import (
"database/sql"
"encoding/hex"
"fmt"
+ "math/big"
"strings"
+ "time"
"github.com/decred/dcrd/blockchain/stake"
"github.com/decred/dcrd/chaincfg"
@@ -2522,6 +2524,88 @@ func retrieveTicketByOutputCount(ctx context.Context, db *sql.DB, dataType outpu
return items, nil
}
+// retrieveChainWork assembles both block-by-block chainwork data
+// and a rolling average for network hashrate data.
+func retrieveChainWork(db *sql.DB) (*dbtypes.ChartsData, *dbtypes.ChartsData, error) {
+ // Grab all chainwork points in rows of (time, chainwork).
+ rows, err := db.Query(internal.SelectChainWork)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer closeRows(rows)
+
+ // Assemble chainwork and hashrate simultaneously.
+ // Chainwork is stored as a 32-byte hex string, so in order to
+ // do math, math/big types are used.
+ workdata := new(dbtypes.ChartsData)
+ hashrates := new(dbtypes.ChartsData)
+ var blocktime dbtypes.TimeDef
+ var workhex string
+
+ // In order to store these large values as uint64, they are represented
+ // as exahash (10^18) for work, and terahash/s (10^12) for hashrate.
+ bigExa := big.NewInt(int64(1e18))
+ bigTera := big.NewInt(int64(1e12))
+
+ // chainWorkPt is stored for a rolling average.
+ type chainWorkPt struct {
+ work *big.Int
+ time time.Time
+ }
+ // How many blocks to average across for hashrate.
+ // 120 is the default returned by the RPC method `getnetworkhashps`.
+ var averagingLength int = 120
+ // points is used as circular storage.
+ points := make([]chainWorkPt, averagingLength)
+ var thisPt, lastPt chainWorkPt
+ var idx, workingIdx, lastIdx int
+ for rows.Next() {
+ // Get the chainwork.
+ err = rows.Scan(&blocktime.T, &workhex)
+ if err != nil {
+ return nil, nil, err
+ }
+ bigwork := new(big.Int)
+ exawork := new(big.Int)
+ bigwork, ok := bigwork.SetString(workhex, 16)
+ if !ok {
+ log.Errorf("Failed to make big.Int from chainwork %s", workhex)
+ break
+ }
+ exawork.Set(bigwork)
+ exawork.Div(bigwork, bigExa)
+ if !exawork.IsUint64() {
+ log.Errorf("Failed to make uint64 from chainwork %s", workhex)
+ break
+ }
+ workdata.ChainWork = append(workdata.ChainWork, exawork.Uint64())
+ workdata.Time = append(workdata.Time, blocktime)
+
+ workingIdx = idx % averagingLength
+ points[workingIdx] = chainWorkPt{bigwork, blocktime.T}
+ if idx >= averagingLength {
+ // lastIdx is actually the point averagingLength blocks ago.
+ lastIdx = (workingIdx + 1) % averagingLength
+ lastPt = points[lastIdx]
+ thisPt = points[workingIdx]
+ diff := new(big.Int)
+ diff.Set(thisPt.work)
+ diff.Sub(diff, lastPt.work)
+ rate := diff.Div(diff, big.NewInt(int64(thisPt.time.Sub(lastPt.time).Seconds())))
+ rate.Div(rate, bigTera)
+ if !rate.IsUint64() {
+ log.Errorf("Failed to make uint64 from rate")
+ break
+ }
+ tDef := dbtypes.TimeDef{thisPt.time}
+ hashrates.Time = append(hashrates.Time, tDef)
+ hashrates.NetHash = append(hashrates.NetHash, rate.Uint64())
+ }
+ idx += 1
+ }
+ return workdata, hashrates, nil
+}
+
// --- blocks and block_chain tables ---
func InsertBlock(db *sql.DB, dbBlock *dbtypes.Block, isValid, isMainchain, checked bool) (uint64, error) {
@@ -2535,7 +2619,7 @@ func InsertBlock(db *sql.DB, dbBlock *dbtypes.Block, isValid, isMainchain, check
dbBlock.FinalState, dbBlock.Voters, dbBlock.FreshStake,
dbBlock.Revocations, dbBlock.PoolSize, dbBlock.Bits,
dbBlock.SBits, dbBlock.Difficulty, dbBlock.ExtraData,
- dbBlock.StakeVersion, dbBlock.PreviousHash).Scan(&id)
+ dbBlock.StakeVersion, dbBlock.PreviousHash, dbBlock.ChainWork).Scan(&id)
return id, err
}
diff --git a/db/dcrpg/sync.go b/db/dcrpg/sync.go
index 9eb73e638a..d7a85306d6 100644
--- a/db/dcrpg/sync.go
+++ b/db/dcrpg/sync.go
@@ -289,13 +289,19 @@ func (db *ChainDB) SyncChainDB(ctx context.Context, client rpcutils.MasterBlockG
}
winners := tpi.Winners
+ // Get the chainwork
+ chainWork, err := client.GetChainWork(blockHash)
+ if err != nil {
+ return ib - 1, fmt.Errorf("GetChainWork failed (%s): %v", blockHash, err)
+ }
+
// Store data from this block in the database
isValid, isMainchain := true, true
// updateExisting is ignored if dupCheck=false, but true since this is
// processing main chain blocks.
updateExisting := true
numVins, numVouts, numAddresses, err := db.StoreBlock(block.MsgBlock(), winners, isValid,
- isMainchain, updateExisting, !updateAllAddresses, !updateAllVotes)
+ isMainchain, updateExisting, !updateAllAddresses, !updateAllVotes, chainWork)
if err != nil {
return ib - 1, fmt.Errorf("StoreBlock failed: %v", err)
}
diff --git a/db/dcrpg/tables.go b/db/dcrpg/tables.go
index d505173d40..30795cc8d6 100644
--- a/db/dcrpg/tables.go
+++ b/db/dcrpg/tables.go
@@ -48,7 +48,7 @@ type dropDuplicatesInfo struct {
// re-indexing and a duplicate scan/purge.
const (
tableMajor = 3
- tableMinor = 6
+ tableMinor = 7
tablePatch = 0
)
@@ -210,6 +210,7 @@ func CreateTables(db *sql.DB) error {
log.Tracef("Table \"%s\" exist.", tableName)
}
}
+
return err
}
diff --git a/db/dcrpg/upgrades.go b/db/dcrpg/upgrades.go
index 2af45e7fa6..cc0f644bf9 100644
--- a/db/dcrpg/upgrades.go
+++ b/db/dcrpg/upgrades.go
@@ -11,6 +11,7 @@ import (
"time"
"github.com/decred/dcrd/blockchain/stake"
+ "github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/rpcclient"
"github.com/decred/dcrd/wire"
"github.com/decred/dcrdata/v3/db/dbtypes"
@@ -46,6 +47,7 @@ const (
agendasBlockTimeDataTypeUpdate
transactionsBlockTimeDataTypeUpdate
vinsBlockTimeDataTypeUpdate
+ blocksChainWorkUpdate
)
type TableUpgradeType struct {
@@ -296,6 +298,22 @@ func (pgb *ChainDB) CheckForAuxDBUpgrade(dcrdClient *rpcclient.Client) (bool, er
}
pgb.createBlockTimeIndexes()
+ // Go on to next upgrade
+ fallthrough
+
+ // Upgrade from 3.6.0 --> 3.7.0
+ case version.major == 3 && version.minor == 6 && version.patch == 0:
+ toVersion = TableVersion{3, 7, 0}
+
+ theseUpgrades := []TableUpgradeType{
+ {"blocks", blocksChainWorkUpdate},
+ }
+
+ smartClient := rpcutils.NewBlockGate(dcrdClient, 10)
+ isSuccess, er := pgb.initiatePgUpgrade(smartClient, theseUpgrades)
+ if !isSuccess {
+ return isSuccess, er
+ }
// Go on to next upgrade
// fallthrough
@@ -413,6 +431,9 @@ func (pgb *ChainDB) handleUpgrades(client *rpcutils.BlockGate,
case vinsBlockTimeDataTypeUpdate:
tableReady = true
tableName, upgradeTypeStr = "vins", "block time data type update"
+ case blocksChainWorkUpdate:
+ tableReady, err = addChainWorkColumn(pgb.db)
+ tableName, upgradeTypeStr = "blocks", "new chainwork column"
default:
return false, fmt.Errorf(`upgrade "%v" is unknown`, tableUpgrade)
}
@@ -530,6 +551,10 @@ func (pgb *ChainDB) handleUpgrades(client *rpcutils.BlockGate,
log.Infof("Patching matching_tx_hash in the addresses table...")
rowsUpdated, err = updateAddressesMatchingTxHashPatch(pgb.db)
+ case blocksChainWorkUpdate:
+ log.Infof("Syncing chainwork. This might take a while...")
+ rowsUpdated, err = verifyChainWork(client, pgb.db)
+
case addressesBlockTimeDataTypeUpdate, blocksBlockTimeDataTypeUpdate,
agendasBlockTimeDataTypeUpdate, transactionsBlockTimeDataTypeUpdate,
vinsBlockTimeDataTypeUpdate:
@@ -1182,6 +1207,13 @@ func addAddressesColumnsForMainchain(db *sql.DB) (bool, error) {
return addNewColumnsIfNotFound(db, "addresses", newColumns)
}
+func addChainWorkColumn(db *sql.DB) (bool, error) {
+ newColumns := []newColumn{
+ {"chainwork", "TEXT", ""},
+ }
+ return addNewColumnsIfNotFound(db, "blocks", newColumns)
+}
+
// versionAllTables comments the tables with the upgraded table version.
func versionAllTables(db *sql.DB, version TableVersion) error {
for tableName := range createTableStatements {
@@ -1195,3 +1227,74 @@ func versionAllTables(db *sql.DB, version TableVersion) error {
}
return nil
}
+
+// verifyChainWork fetches and inserts missing chainwork values.
+// This addresses a table update done at DB version 3.7.0.
+func verifyChainWork(blockgate *rpcutils.BlockGate, db *sql.DB) (int64, error) {
+ // Count rows with missing chainWork.
+ var count int64
+ countRow := db.QueryRow(`SELECT COUNT(hash) FROM blocks WHERE chainwork IS NULL;`)
+ err := countRow.Scan(&count)
+ if err != nil {
+ log.Error("Failed to count null chainwork columns: %v", err)
+ return 0, err
+ }
+ if count == 0 {
+ return 0, nil
+ }
+
+ // Prepare the insertion statment. Parameters: 1. chainwork; 2. blockhash.
+ stmt, err := db.Prepare(`UPDATE blocks SET chainwork=$1 WHERE hash=$2;`)
+ if err != nil {
+ log.Error("Failed to prepare chainwork insertion statement: %v", err)
+ return 0, err
+ }
+ defer stmt.Close()
+
+ // Grab the blockhashes from rows that don't have chainwork.
+ rows, err := db.Query(`SELECT hash FROM blocks WHERE chainwork IS NULL;`)
+ if err != nil {
+ log.Error("Failed to query database for missing chainwork: %v", err)
+ return 0, err
+ }
+ defer rows.Close()
+
+ var hashStr string
+ var updated int64 = 0
+ client := blockgate.Client()
+ tReport := time.Now().Unix()
+ for rows.Next() {
+ err = rows.Scan(&hashStr)
+ if err != nil {
+ log.Error("Failed to Scan null chainwork results. Aborting chainwork sync: %v", err)
+ return updated, err
+ }
+
+ blockHash, err := chainhash.NewHashFromStr(hashStr)
+ if err != nil {
+ log.Errorf("Failed to parse hash from string %s. Aborting chainwork sync.: %v", hashStr, err)
+ return updated, err
+ }
+
+ chainWork, err := rpcutils.GetChainWork(client, blockHash)
+ if err != nil {
+ log.Errorf("GetChainWork failed (%s). Aborting chainwork sync.: %v", hashStr, err)
+ return updated, err
+ }
+
+ _, err = stmt.Exec(chainWork, hashStr)
+ if err != nil {
+ log.Errorf("Failed to insert chainwork (%s) for block %s. Aborting chainwork sync: %v", chainWork, hashStr, err)
+ return updated, err
+ }
+ updated += 1
+ // Every two minutes, report the sync status.
+ if updated%100 == 0 && time.Now().Unix()-tReport > 120 {
+ tReport = time.Now().Unix()
+ log.Infof("Chainwork sync is %.1f%% complete.", float64(updated)/float64(count)*100)
+ }
+ }
+
+ log.Info("Chainwork sync complete.")
+ return updated, nil
+}
diff --git a/main.go b/main.go
index 74d1688aba..0f5702984b 100644
--- a/main.go
+++ b/main.go
@@ -685,6 +685,13 @@ func _main(ctx context.Context) error {
continue
}
+ // Get the chainwork
+ chainWork, err := rpcutils.GetChainWork(auxDB.Client, blockHash)
+ if err != nil {
+ log.Errorf("GetChainWork failed (%s): %v", blockHash, err)
+ continue
+ }
+
// SQLite / base DB
// TODO: Make hash the primary key instead of height, otherwise
// the main chain block will be overwritten.
@@ -719,7 +726,7 @@ func _main(ctx context.Context) error {
// Store data in the aux (dcrpg) DB.
_, _, _, err = auxDB.StoreBlock(msgBlock, blockData.WinningTickets,
- isValid, isMainchain, updateExistingRecords, true, true)
+ isValid, isMainchain, updateExistingRecords, true, true, chainWork)
if err != nil {
// If data collection succeeded, but storage fails, bail out
// to diagnose the DB trouble.
diff --git a/public/js/controllers/charts.js b/public/js/controllers/charts.js
index 2d0547ac08..87b5436a5e 100644
--- a/public/js/controllers/charts.js
+++ b/public/js/controllers/charts.js
@@ -105,6 +105,14 @@
return _.map(gData.height, (n,i) => { return [n, gData.solo[i], gData.pooled[i]] })
}
+ function chainWorkFunc(gData){
+ return _.map(gData.time,(n,i) => { return [new Date(n), gData.chainwork[i]] })
+ }
+
+ function hashrateFunc(gData){
+ return _.map(gData.time,(n,i) => { return [new Date(n), gData.nethash[i]] })
+ }
+
function mapDygraphOptions(data,labelsVal, isDrawPoint, yLabel, xLabel, titleName, labelsMG, labelsMG2){
return _.merge({
'file': data,
@@ -276,7 +284,8 @@
stackedGraph: true,
plotter: barchartPlotter,
})
- break;
+ break;
+
case 'ticket-by-outputs-blocks': // Tickets by output count graph for all blocks
d = ticketByOutputCountFunc(data)
_.assign(gOptions, mapDygraphOptions(
@@ -294,6 +303,18 @@
plotter: barchartPlotter
})
break;
+
+ case 'chainwork': // Total chainwork over time
+ d = chainWorkFunc(data)
+ _.assign(gOptions, mapDygraphOptions(d, ['Date', 'Cumulative Chainwork (exahash)'],
+ false, 'Cumulative Chainwork (exahash)','Date', undefined, true, false))
+ break;
+
+ case 'hashrate': // Total chainwork over time
+ d = hashrateFunc(data)
+ _.assign(gOptions, mapDygraphOptions(d, ['Date', 'Network Hashrate (terahash/s)'],
+ false, 'Network Hashrate (terahash/s)','Date', undefined, true, false))
+ break;
}
this.chartsView.updateOptions(gOptions, false);
this.chartsView.resetZoom();
diff --git a/rpcutils/blockgate.go b/rpcutils/blockgate.go
index 9069f5bd0b..e1e2b68fee 100644
--- a/rpcutils/blockgate.go
+++ b/rpcutils/blockgate.go
@@ -21,6 +21,7 @@ type BlockGetter interface {
Block(chainhash.Hash) (*dcrutil.Block, error)
WaitForHeight(int64) chan chainhash.Hash
WaitForHash(chainhash.Hash) chan int64
+ GetChainWork(*chainhash.Hash) (string, error)
}
// MasterBlockGetter builds on BlockGetter, adding functions that fetch blocks
@@ -300,3 +301,14 @@ func (g *BlockGate) WaitForHash(hash chainhash.Hash) chan int64 {
}
return waitChain
}
+
+// GetChainwork fetches the dcrjson.BlockHeaderVerbose
+// and returns only the ChainWork attribute as a string
+func (g *BlockGate) GetChainWork(hash *chainhash.Hash) (string, error) {
+ return GetChainWork(g.client, hash)
+}
+
+// Client is just an access function to get the BlockGate's RPC client.
+func (g *BlockGate) Client() *rpcclient.Client {
+ return g.client
+}
diff --git a/rpcutils/rpcclient.go b/rpcutils/rpcclient.go
index 7c8a533c0c..ab346ad39f 100644
--- a/rpcutils/rpcclient.go
+++ b/rpcutils/rpcclient.go
@@ -355,3 +355,13 @@ func SearchRawTransaction(client *rpcclient.Client, count int, address string) (
}
return txs, nil
}
+
+// GetChainwork fetches the dcrjson.BlockHeaderVerbose
+// and returns only the ChainWork field as a string.
+func GetChainWork(client *rpcclient.Client, hash *chainhash.Hash) (string, error) {
+ header, err := client.GetBlockHeaderVerbose(hash)
+ if err != nil {
+ return "", err
+ }
+ return header.ChainWork, nil
+}
diff --git a/views/charts.tmpl b/views/charts.tmpl
index bc889900a8..b6da7034ab 100644
--- a/views/charts.tmpl
+++ b/views/charts.tmpl
@@ -35,6 +35,8 @@
+
+