diff --git a/cmd/tomo/chaincmd.go b/cmd/tomo/chaincmd.go
index dc0a274ba..aac0c8d9d 100644
--- a/cmd/tomo/chaincmd.go
+++ b/cmd/tomo/chaincmd.go
@@ -19,23 +19,24 @@ package main
import (
"encoding/json"
"fmt"
- "github.com/tomochain/tomochain/core/rawdb"
"os"
"runtime"
"strconv"
"sync/atomic"
"time"
+ "gopkg.in/urfave/cli.v1"
+
"github.com/tomochain/tomochain/cmd/utils"
"github.com/tomochain/tomochain/common"
"github.com/tomochain/tomochain/console"
"github.com/tomochain/tomochain/core"
+ "github.com/tomochain/tomochain/core/rawdb"
"github.com/tomochain/tomochain/core/state"
"github.com/tomochain/tomochain/core/types"
"github.com/tomochain/tomochain/eth/downloader"
"github.com/tomochain/tomochain/event"
"github.com/tomochain/tomochain/log"
- "gopkg.in/urfave/cli.v1"
)
var (
@@ -68,6 +69,8 @@ It expects the genesis file as argument.`,
utils.GCModeFlag,
utils.CacheDatabaseFlag,
utils.CacheGCFlag,
+ utils.MetricsEnabledFlag,
+ utils.MetricsEnabledExpensiveFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
diff --git a/cmd/tomo/main.go b/cmd/tomo/main.go
index a86ead4ba..4656bea41 100644
--- a/cmd/tomo/main.go
+++ b/cmd/tomo/main.go
@@ -24,6 +24,8 @@ import (
"strings"
"time"
+ "gopkg.in/urfave/cli.v1"
+
"github.com/tomochain/tomochain/accounts"
"github.com/tomochain/tomochain/accounts/keystore"
"github.com/tomochain/tomochain/cmd/utils"
@@ -36,7 +38,6 @@ import (
"github.com/tomochain/tomochain/log"
"github.com/tomochain/tomochain/metrics"
"github.com/tomochain/tomochain/node"
- "gopkg.in/urfave/cli.v1"
)
const (
@@ -118,6 +119,7 @@ var (
utils.RPCVirtualHostsFlag,
utils.EthStatsURLFlag,
utils.MetricsEnabledFlag,
+ utils.MetricsEnabledExpensiveFlag,
//utils.FakePoWFlag,
//utils.NoCompactionFlag,
//utils.GpoBlocksFlag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 59a0cdeaf..0a17812e0 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -43,7 +43,6 @@ import (
"github.com/tomochain/tomochain/eth/gasprice"
"github.com/tomochain/tomochain/ethdb"
"github.com/tomochain/tomochain/log"
- "github.com/tomochain/tomochain/metrics"
"github.com/tomochain/tomochain/node"
"github.com/tomochain/tomochain/p2p"
"github.com/tomochain/tomochain/p2p/discover"
@@ -355,10 +354,15 @@ var (
Name: "ethstats",
Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)",
}
+ // Metrics flags
MetricsEnabledFlag = cli.BoolFlag{
- Name: metrics.MetricsEnabledFlag,
+ Name: "metrics",
Usage: "Enable metrics collection and reporting",
}
+ MetricsEnabledExpensiveFlag = &cli.BoolFlag{
+ Name: "metrics.expensive",
+ Usage: "Enable expensive metrics collection and reporting",
+ }
FakePoWFlag = cli.BoolFlag{
Name: "fakepow",
Usage: "Disables proof-of-work verification",
diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index 139131e5b..8b91560c6 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -562,7 +562,7 @@ func (ethash *Ethash) SetThreads(threads int) {
// Hashrate implements PoW, returning the measured rate of the search invocations
// per second over the last minute.
func (ethash *Ethash) Hashrate() float64 {
- return ethash.hashrate.Rate1()
+ return ethash.hashrate.Snapshot().Rate1()
}
// APIs implements consensus.Engine, returning the user facing RPC APIs. Currently
diff --git a/core/blockchain.go b/core/blockchain.go
index bc0d8f876..4a46b6050 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -55,9 +55,37 @@ import (
)
var (
- blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
- CheckpointCh = make(chan int)
- ErrNoGenesis = errors.New("Genesis not found in chain")
+ headBlockGauge = metrics.NewRegisteredGauge("chain/head/block", nil)
+ headHeaderGauge = metrics.NewRegisteredGauge("chain/head/header", nil)
+ headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
+
+ chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil)
+
+ accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
+ accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
+ accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
+ accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
+
+ storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
+ storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
+ storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
+ storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
+
+ triedbCommitTimer = metrics.NewRegisteredTimer("chain/triedb/commits", nil)
+
+ blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
+ blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
+ blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
+ blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
+
+ blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
+ blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
+ blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
+)
+
+var (
+ CheckpointCh = make(chan int)
+ ErrNoGenesis = errors.New("Genesis not found in chain")
)
const (
@@ -231,6 +259,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
if bc.genesisBlock == nil {
return nil, ErrNoGenesis
}
+
+ // Update chain info data metrics
+ chainInfoGauge.Update(metrics.GaugeInfoValue{"chain_id": bc.chainConfig.ChainId.String()})
+
if err := bc.loadLastState(); err != nil {
return nil, err
}
@@ -289,6 +321,8 @@ func (bc *BlockChain) loadLastState() error {
log.Warn("Head block missing, resetting chain", "hash", head)
return bc.Reset()
}
+ // Everything seems to be fine, set as the head block
+ headBlockGauge.Update(int64(currentBlock.NumberU64()))
repair := false
if common.Rewound != uint64(0) {
repair = true
@@ -353,9 +387,11 @@ func (bc *BlockChain) loadLastState() error {
// Restore the last known head fast block
bc.currentFastBlock.Store(currentBlock)
+ headFastBlockGauge.Update(int64(currentBlock.NumberU64()))
if head := rawdb.GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
if block := bc.GetBlockByHash(head); block != nil {
bc.currentFastBlock.Store(block)
+ headFastBlockGauge.Update(int64(block.NumberU64()))
}
}
@@ -423,9 +459,11 @@ func (bc *BlockChain) SetHead(head uint64) error {
if err := rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil {
log.Crit("Failed to reset head full block", "err", err)
}
+ headBlockGauge.Update(int64(currentBlock.NumberU64()))
if err := rawdb.WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil {
log.Crit("Failed to reset head fast block", "err", err)
}
+ headFastBlockGauge.Update(int64(currentFastBlock.NumberU64()))
return bc.loadLastState()
}
@@ -568,10 +606,11 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock)
bc.currentBlock.Store(bc.genesisBlock)
+ headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
bc.hc.SetGenesis(bc.genesisBlock.Header())
bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
bc.currentFastBlock.Store(bc.genesisBlock)
-
+ headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
return nil
}
@@ -668,6 +707,7 @@ func (bc *BlockChain) insert(block *types.Block) {
log.Crit("Failed to insert head block hash", "err", err)
}
bc.currentBlock.Store(block)
+ headBlockGauge.Update(int64(block.NumberU64()))
// save cache BlockSigners
if bc.chainConfig.Posv != nil && !bc.chainConfig.IsTIPSigning(block.Number()) {
@@ -685,6 +725,7 @@ func (bc *BlockChain) insert(block *types.Block) {
log.Crit("Failed to insert head fast block hash", "err", err)
}
bc.currentFastBlock.Store(block)
+ headFastBlockGauge.Update(int64(block.NumberU64()))
}
}
@@ -1122,6 +1163,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
log.Crit("Failed to update head fast block hash", "err", err)
}
bc.currentFastBlock.Store(head)
+ headFastBlockGauge.Update(int64(head.NumberU64()))
}
}
bc.mu.Unlock()
@@ -1358,6 +1400,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
if err := batch.Write(); err != nil {
return NonStatTy, err
}
+ headBlockGauge.Update(int64(block.NumberU64()))
// Set new head.
if status == CanonStatTy {
@@ -1444,8 +1487,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
return i, events, coalescedLogs, ErrBlacklistedHash
}
// Wait for the block's verification to complete
- bstart := time.Now()
-
err := <-results
if err == nil {
err = bc.Validator().ValidateBody(block)
@@ -1515,6 +1556,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
}
// Create a new statedb using the parent block and report an
// error if it fails.
+ start := time.Now()
var parent *types.Block
if i == 0 {
parent = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
@@ -1626,24 +1668,55 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
}
}
feeCapacity := state.GetTRC21FeeCapacityFromStateWithCache(parent.Root(), statedb)
+
// Process block using the parent state as reference point.
+ pstart := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, tradingState, bc.vmConfig, feeCapacity)
if err != nil {
bc.reportBlock(block, receipts, err)
return i, events, coalescedLogs, err
}
+ ptime := time.Since(pstart)
+
// Validate the state using the default validator
+ vstart := time.Now()
err = bc.Validator().ValidateState(block, parent, statedb, receipts, usedGas)
if err != nil {
bc.reportBlock(block, receipts, err)
return i, events, coalescedLogs, err
}
- proctime := time.Since(bstart)
+ vtime := time.Since(vstart)
+ proctime := time.Since(start) // processing + validation
+
+ // Update the metrics touched during block processing and validation
+ accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
+ storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
+ accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
+ storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
+ accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
+ storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation)
+ triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing
+ trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
+ trieRead := statedb.AccountReads // The time spent on account read
+ trieRead += statedb.StorageReads // The time spent on storage read
+ blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing
+ blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
+
// Write the block to the chain and get the status.
+ wstart := time.Now()
status, err := bc.WriteBlockWithState(block, receipts, statedb, tradingState, lendingState)
if err != nil {
return i, events, coalescedLogs, err
}
+ // Update the metrics touched during block commit
+ accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
+ storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
+ triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them
+
+ blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.TrieDBCommits)
+ duration := time.Since(start)
+ blockInsertTimer.Update(duration)
+
if bc.chainConfig.Posv != nil {
c := bc.engine.(*posv.Posv)
coinbase := c.Signer()
@@ -1663,10 +1736,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
switch status {
case CanonStatTy:
log.Debug("Inserted new block from downloader", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
- "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)))
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(duration))
coalescedLogs = append(coalescedLogs, logs...)
- blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
@@ -1679,9 +1751,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
}
case SideStatTy:
log.Debug("Inserted forked block from downloader", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
- common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
-
- blockInsertTimer.UpdateSince(bstart)
+ common.PrettyDuration(duration), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
events = append(events, ChainSideEvent{block})
bc.UpdateBlocksHashCache(block)
}
@@ -1771,7 +1841,6 @@ func (bc *BlockChain) getResultBlock(block *types.Block, verifiedM2 bool) (*Resu
return nil, ErrBlacklistedHash
}
// Wait for the block's verification to complete
- bstart := time.Now()
err := bc.Validator().ValidateBody(block)
switch {
case err == ErrKnownBlock:
@@ -1918,23 +1987,44 @@ func (bc *BlockChain) getResultBlock(block *types.Block, verifiedM2 bool) (*Resu
}
feeCapacity := state.GetTRC21FeeCapacityFromStateWithCache(parent.Root(), statedb)
// Process block using the parent state as reference point.
+ var (
+ start = time.Now()
+ pstart = time.Now()
+ )
receipts, logs, usedGas, err := bc.processor.ProcessBlockNoValidator(calculatedBlock, statedb, tradingState, bc.vmConfig, feeCapacity)
- process := time.Since(bstart)
if err != nil {
if err != ErrStopPreparingBlock {
bc.reportBlock(block, receipts, err)
}
return nil, err
}
+ ptime := time.Since(pstart)
// Validate the state using the default validator
+ vstart := time.Now()
err = bc.Validator().ValidateState(block, parent, statedb, receipts, usedGas)
if err != nil {
bc.reportBlock(block, receipts, err)
return nil, err
}
- proctime := time.Since(bstart)
+ vtime := time.Since(vstart)
+ proctime := time.Since(start) // processing + validation
+
+ // Update the metrics touched during block processing and validation
+ accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
+ storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
+ accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
+ storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
+ accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
+ storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation)
+ triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing
+ trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
+ trieRead := statedb.AccountReads // The time spent on account read
+ trieRead += statedb.StorageReads // The time spent on storage read
+ blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing
+ blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
+
log.Debug("Calculate new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()),
- "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart)), "process", process)
+ "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(start)))
return &ResultProcessBlock{receipts: receipts, logs: logs, state: statedb, tradingState: tradingState, lendingState: lendingState, proctime: proctime, usedGas: usedGas}, nil
}
@@ -1985,8 +2075,18 @@ func (bc *BlockChain) insertBlock(block *types.Block) ([]interface{}, []*types.L
if bc.HasBlockAndFullState(block.Hash(), block.NumberU64()) {
return events, coalescedLogs, nil
}
+
+ wstart := time.Now()
status, err := bc.WriteBlockWithState(block, result.receipts, result.state, result.tradingState, result.lendingState)
+ // Update the metrics touched during block commit
+ accountCommitTimer.Update(result.state.AccountCommits) // Account commits are complete, we can mark them
+ storageCommitTimer.Update(result.state.StorageCommits) // Storage commits are complete, we can mark them
+ triedbCommitTimer.Update(result.state.TrieDBCommits) // Trie database commits are complete, we can mark them
+
+ blockWriteTimer.Update(time.Since(wstart) - result.state.AccountCommits - result.state.StorageCommits - result.state.TrieDBCommits)
+ blockInsertTimer.Update(result.proctime)
+
if err != nil {
return events, coalescedLogs, err
}
@@ -2023,8 +2123,6 @@ func (bc *BlockChain) insertBlock(block *types.Block) ([]interface{}, []*types.L
case SideStatTy:
log.Debug("Inserted forked block from fetcher", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed",
common.PrettyDuration(time.Since(block.ReceivedAt)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()))
-
- blockInsertTimer.Update(result.proctime)
events = append(events, ChainSideEvent{block})
bc.UpdateBlocksHashCache(block)
@@ -2180,6 +2278,9 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
}
logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(),
"drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash())
+ blockReorgAddMeter.Mark(int64(len(newChain)))
+ blockReorgDropMeter.Mark(int64(len(oldChain)))
+ blockReorgMeter.Mark(1)
} else {
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
}
diff --git a/core/headerchain.go b/core/headerchain.go
index 4fe236824..8c3fe0773 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -106,7 +106,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
}
}
hc.currentHeaderHash = hc.CurrentHeader().Hash()
-
+ headHeaderGauge.Update(hc.CurrentHeader().Number.Int64())
return hc, nil
}
@@ -398,6 +398,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
}
hc.currentHeader.Store(head)
hc.currentHeaderHash = head.Hash()
+ headHeaderGauge.Update(head.Number.Int64())
}
// DeleteCallback is a callback function that is called by SetHead before
@@ -422,6 +423,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
rawdb.DeleteHeader(hc.chainDb, hash, num)
rawdb.DeleteTd(hc.chainDb, hash, num)
hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1))
+ headHeaderGauge.Update(hdr.Number.Int64())
}
// Roll back the canonical chain numbering
for i := height; i > head; i-- {
diff --git a/core/state/metrics.go b/core/state/metrics.go
new file mode 100644
index 000000000..7d7b7e14d
--- /dev/null
+++ b/core/state/metrics.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import "github.com/tomochain/tomochain/metrics"
+
+var (
+ accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
+ storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
+ accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
+ storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
+)
diff --git a/core/state/state_object.go b/core/state/state_object.go
index b03231e23..593c9a144 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -21,9 +21,11 @@ import (
"fmt"
"io"
"math/big"
+ "time"
"github.com/tomochain/tomochain/common"
"github.com/tomochain/tomochain/crypto"
+ "github.com/tomochain/tomochain/metrics"
"github.com/tomochain/tomochain/rlp"
)
@@ -171,7 +173,12 @@ func (c *stateObject) getTrie(db Database) Trie {
func (self *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
value := common.Hash{}
// Load from DB in case it is missing.
- enc, err := self.getTrie(db).TryGet(key[:])
+ tr := self.getTrie(db)
+ start := time.Now()
+ enc, err := tr.TryGet(key[:])
+ if metrics.EnabledExpensive {
+ self.db.StorageReads += time.Since(start)
+ }
if err != nil {
self.setError(err)
return common.Hash{}
@@ -232,16 +239,22 @@ func (self *stateObject) setState(key, value common.Hash) {
// updateTrie writes cached storage modifications into the object's storage trie.
func (self *stateObject) updateTrie(db Database) Trie {
+ // Track the amount of time wasted on updating the storage trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { self.db.StorageUpdates += time.Since(start) }(time.Now())
+ }
tr := self.getTrie(db)
for key, value := range self.dirtyStorage {
delete(self.dirtyStorage, key)
if (value == common.Hash{}) {
self.setError(tr.TryDelete(key[:]))
+ self.db.StorageDeleted += 1
continue
}
// Encoding []byte cannot fail, ok to ignore the error.
v, _ := rlp.EncodeToBytes(bytes.TrimLeft(value[:], "\x00"))
self.setError(tr.TryUpdate(key[:], v))
+ self.db.StorageUpdated += 1
}
return tr
}
@@ -249,6 +262,10 @@ func (self *stateObject) updateTrie(db Database) Trie {
// UpdateRoot sets the trie root to the current root hash of
func (self *stateObject) updateRoot(db Database) {
self.updateTrie(db)
+ // Track the amount of time wasted on hashing the storage trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { self.db.StorageHashes += time.Since(start) }(time.Now())
+ }
self.data.Root = self.trie.Hash()
}
@@ -259,6 +276,10 @@ func (self *stateObject) CommitTrie(db Database) error {
if self.dbErr != nil {
return self.dbErr
}
+ // Track the amount of time wasted on committing the storage trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { self.db.StorageCommits += time.Since(start) }(time.Now())
+ }
root, err := self.trie.Commit(nil)
if err == nil {
self.data.Root = root
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 7a3357b3e..2efed7393 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -22,11 +22,13 @@ import (
"math/big"
"sort"
"sync"
+ "time"
"github.com/tomochain/tomochain/common"
"github.com/tomochain/tomochain/core/types"
"github.com/tomochain/tomochain/crypto"
"github.com/tomochain/tomochain/log"
+ "github.com/tomochain/tomochain/metrics"
"github.com/tomochain/tomochain/rlp"
"github.com/tomochain/tomochain/trie"
)
@@ -80,6 +82,22 @@ type StateDB struct {
validRevisions []revision
nextRevisionId int
+ // Measurements gathered during execution for debugging purposes
+ AccountReads time.Duration
+ AccountHashes time.Duration
+ AccountUpdates time.Duration
+ AccountCommits time.Duration
+ StorageReads time.Duration
+ StorageHashes time.Duration
+ StorageUpdates time.Duration
+ StorageCommits time.Duration
+ TrieDBCommits time.Duration
+
+ AccountUpdated int
+ StorageUpdated int
+ AccountDeleted int
+ StorageDeleted int
+
lock sync.Mutex
}
@@ -359,6 +377,10 @@ func (self *StateDB) Suicide(addr common.Address) bool {
// updateStateObject writes the given object to the trie.
func (self *StateDB) updateStateObject(stateObject *stateObject) {
+ // Track the amount of time wasted on updating the account from the trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { self.AccountUpdates += time.Since(start) }(time.Now())
+ }
addr := stateObject.Address()
data, err := rlp.EncodeToBytes(stateObject)
if err != nil {
@@ -369,6 +391,10 @@ func (self *StateDB) updateStateObject(stateObject *stateObject) {
// deleteStateObject removes the given object from the state trie.
func (self *StateDB) deleteStateObject(stateObject *stateObject) {
+ // Track the amount of time wasted on deleting the account from the trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { self.AccountUpdates += time.Since(start) }(time.Now())
+ }
stateObject.deleted = true
addr := stateObject.Address()
self.setError(self.trie.TryDelete(addr[:]))
@@ -393,7 +419,11 @@ func (self *StateDB) getStateObject(addr common.Address) (stateObject *stateObje
}
// Load the object from the database.
+ start := time.Now()
enc, err := self.trie.TryGet(addr[:])
+ if metrics.EnabledExpensive {
+ self.AccountReads += time.Since(start)
+ }
if len(enc) == 0 {
self.setError(err)
return nil
@@ -449,8 +479,8 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
// CreateAccount is called during the EVM CREATE operation. The situation might arise that
// a contract does the following:
//
-// 1. sends funds to sha(account ++ (nonce + 1))
-// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
+// 1. sends funds to sha(account ++ (nonce + 1))
+// 2. tx_create(sha(account ++ nonce)) (note that this gets the address of 1)
//
// Carrying over the balance ensures that Ether doesn't disappear.
func (self *StateDB) CreateAccount(addr common.Address) {
@@ -555,9 +585,11 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
stateObject := s.stateObjects[addr]
if stateObject.suicided || (deleteEmptyObjects && stateObject.empty()) {
s.deleteStateObject(stateObject)
+ s.AccountDeleted++
} else {
stateObject.updateRoot(s.db)
s.updateStateObject(stateObject)
+ s.AccountUpdated++
}
}
// Invalidate journal because reverting across transactions is not allowed.
@@ -569,6 +601,10 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// goes into transaction receipts.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.Finalise(deleteEmptyObjects)
+ // Track the amount of time wasted on hashing the account trie
+ if metrics.EnabledExpensive {
+ defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
+ }
return s.trie.Hash()
}
@@ -611,7 +647,11 @@ func (s *StateDB) clearJournalAndRefund() {
func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) {
defer s.clearJournalAndRefund()
- // Commit objects to the trie.
+ // Write the account trie changes, measuring the amount of wasted time
+ var start time.Time
+ if metrics.EnabledExpensive {
+ start = time.Now()
+ }
for addr, stateObject := range s.stateObjects {
_, isDirty := s.stateObjectsDirty[addr]
switch {
@@ -619,6 +659,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error)
// If the object has been removed, don't bother syncing it
// and just mark it for deletion in the trie.
s.deleteStateObject(stateObject)
+ s.AccountDeleted++
case isDirty:
// Write any contract code associated with the state object
if stateObject.code != nil && stateObject.dirtyCode {
@@ -631,10 +672,22 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error)
}
// Update the object in the main account trie.
s.updateStateObject(stateObject)
+ s.AccountUpdated++
}
delete(s.stateObjectsDirty, addr)
}
+ if metrics.EnabledExpensive {
+ s.AccountCommits += time.Since(start)
+
+ accountUpdatedMeter.Mark(int64(s.AccountUpdated))
+ storageUpdatedMeter.Mark(int64(s.StorageUpdated))
+ accountDeletedMeter.Mark(int64(s.AccountDeleted))
+ storageDeletedMeter.Mark(int64(s.StorageDeleted))
+ s.AccountUpdated, s.AccountDeleted = 0, 0
+ s.StorageUpdated, s.StorageDeleted = 0, 0
+ }
// Write trie changes.
+ start = time.Now()
root, err = s.trie.Commit(func(leaf []byte, parent common.Hash) error {
var account Account
if err := rlp.DecodeBytes(leaf, &account); err != nil {
@@ -649,6 +702,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error)
}
return nil
})
+ if metrics.EnabledExpensive {
+ s.TrieDBCommits += time.Since(start)
+ }
return root, err
}
diff --git a/go.mod b/go.mod
index 9087373a4..5c4e4b902 100644
--- a/go.mod
+++ b/go.mod
@@ -18,14 +18,15 @@ require (
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8
github.com/go-stack/stack v1.8.0
github.com/golang/protobuf v1.3.2
- github.com/golang/snappy v0.0.1
+ github.com/golang/snappy v0.0.4
github.com/hashicorp/golang-lru v0.5.3
github.com/huin/goupnp v1.0.0
- github.com/influxdata/influxdb v1.7.9
+ github.com/influxdata/influxdb-client-go/v2 v2.13.0
+ github.com/influxdata/influxdb1-client v0.0.0-20190809212627-fc22c7df067e
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
github.com/julienschmidt/httprouter v1.3.0
github.com/karalabe/hid v1.0.0
- github.com/mattn/go-colorable v0.1.0
+ github.com/mattn/go-colorable v0.1.13
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c
github.com/pborman/uuid v1.2.0
@@ -34,14 +35,16 @@ require (
github.com/prometheus/prometheus v1.7.2-0.20170814170113-3101606756c5
github.com/rjeczalik/notify v0.9.2
github.com/rs/cors v1.6.0
+ github.com/shirou/gopsutil v3.21.11+incompatible
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570
- github.com/stretchr/testify v1.4.0
+ github.com/stretchr/testify v1.8.4
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d
- golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
- golang.org/x/net v0.0.0-20220722155237-a158d28d115b
- golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
- golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
- golang.org/x/tools v0.1.12
+ golang.org/x/crypto v0.18.0
+ golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
+ golang.org/x/net v0.20.0
+ golang.org/x/sync v0.6.0
+ golang.org/x/sys v0.16.0
+ golang.org/x/tools v0.17.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
@@ -50,27 +53,33 @@ require (
)
require (
+ github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
- github.com/google/go-cmp v0.3.1 // indirect
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
- github.com/google/uuid v1.0.0 // indirect
+ github.com/google/uuid v1.3.1 // indirect
+ github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/maruel/panicparse v0.0.0-20160720141634-ad661195ed0e // indirect
github.com/maruel/ut v1.0.2 // indirect
- github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 // indirect
+ github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/naoina/go-stringutil v0.1.0 // indirect
github.com/nsf/termbox-go v0.0.0-20170211012700-3540b76b9c77 // indirect
+ github.com/oapi-codegen/runtime v1.0.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.6.1 // indirect
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect
- golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
- golang.org/x/text v0.3.8 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
+ github.com/tklauser/go-sysconf v0.3.13 // indirect
+ github.com/tklauser/numcpus v0.7.0 // indirect
+ github.com/yusufpapurcu/wmi v1.2.3 // indirect
+ golang.org/x/mod v0.14.0 // indirect
+ golang.org/x/term v0.16.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools v2.2.0+incompatible // indirect
)
diff --git a/go.sum b/go.sum
index 8c815c75c..d10e868ca 100644
--- a/go.sum
+++ b/go.sum
@@ -3,6 +3,7 @@ bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxo
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw=
@@ -11,6 +12,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
+github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
+github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/aristanetworks/fsnotify v1.4.2/go.mod h1:D/rtu7LpjYM8tRJphJ0hUBYpjai8SfX+aSNsWDTq/Ks=
github.com/aristanetworks/glog v0.0.0-20180419172825-c15b03b3054f/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA=
github.com/aristanetworks/goarista v0.0.0-20191023202215-f096da5361bb h1:gXDS2cX8AS8KbnP32J6XMSjzC1FhHEdHfUUCy018VrA=
@@ -19,6 +22,7 @@ github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
@@ -61,6 +65,8 @@ github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
@@ -72,17 +78,18 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
-github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
@@ -92,14 +99,18 @@ github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
-github.com/influxdata/influxdb v1.7.9 h1:uSeBTNO4rBkbp1Be5FKRsAmglM9nlx25TzVQRQt1An4=
-github.com/influxdata/influxdb v1.7.9/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
+github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM=
+github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4=
+github.com/influxdata/influxdb1-client v0.0.0-20190809212627-fc22c7df067e h1:txQltCyjXAqVVSZDArPEhUTg35hKwVIuXwtQo7eAMNQ=
github.com/influxdata/influxdb1-client v0.0.0-20190809212627-fc22c7df067e/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU=
+github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
@@ -123,10 +134,11 @@ github.com/maruel/panicparse v0.0.0-20160720141634-ad661195ed0e h1:e2z/lz9pvtRrE
github.com/maruel/panicparse v0.0.0-20160720141634-ad661195ed0e/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI=
github.com/maruel/ut v1.0.2 h1:mQTlQk3jubTbdTcza+hwoZQWhzcvE4L6K6RTtAFlA1k=
github.com/maruel/ut v1.0.2/go.mod h1:RV8PwPD9dd2KFlnlCc/DB2JVvkXmyaalfc5xvmSrRSs=
-github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o=
-github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 h1:USWjF42jDCSEeikX/G1g40ZWnsPXN5WkZ4jMHZWyBK4=
-github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
@@ -144,6 +156,8 @@ github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcou
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nsf/termbox-go v0.0.0-20170211012700-3540b76b9c77 h1:gKl78uP/I7JZ56OFtRf7nc4m1icV38hwV0In5pEGzeA=
github.com/nsf/termbox-go v0.0.0-20170211012700-3540b76b9c77/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
+github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo=
+github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A=
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk=
github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -186,7 +200,10 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE
github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
+github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=
github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
@@ -195,26 +212,36 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs=
github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU=
github.com/templexxx/xor v0.0.0-20181023030647-4e92f724b73b/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4=
github.com/tjfoc/gmsm v1.0.1/go.mod h1:XxO4hdhhrzAd+G4CjDqaOkd0hUzmtPR/d3EiBBMn/wc=
+github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
+github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
+github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
+github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xtaci/kcp-go v5.4.5+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE=
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
+github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
+golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -224,15 +251,17 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -241,29 +270,36 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190912185636-87d9f09c5d89/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -286,8 +322,6 @@ gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951 h1:DMTcQRFbEH6
gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951/go.mod h1:owOxCRGGeAx1uugABik6K9oeNu1cgxP/R9ItzLDxNWA=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
-gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772 h1:hhsSf/5z74Ck/DJYc+R8zpq8KGm7uJvpdLRQED/IedA=
-gopkg.in/olebedev/go-duktape.v3 v3.0.0-20190213234257-ec84240a7772/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9 h1:ITeyKbRetrVzqR3U1eY+ywgp7IBspGd1U/bkwd1gWu4=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA=
@@ -299,6 +333,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index 7e6539fb2..7ac781a0a 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -91,9 +91,9 @@ var Flags = []cli.Flag{
//vmoduleFlag,
//backtraceAtFlag,
//debugFlag,
- //pprofFlag,
- //pprofAddrFlag,
- //pprofPortFlag,
+ pprofFlag,
+ pprofAddrFlag,
+ pprofPortFlag,
//memprofilerateFlag,
//blockprofilerateFlag,
//cpuprofileFlag,
diff --git a/metrics/README.md b/metrics/README.md
index bc2a45a83..e2d794500 100644
--- a/metrics/README.md
+++ b/metrics/README.md
@@ -5,7 +5,7 @@ go-metrics
Go port of Coda Hale's Metrics library: .
-Documentation: .
+Documentation: .
Usage
-----
@@ -128,7 +128,7 @@ go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
Maintain all metrics along with expvars at `/debug/metrics`:
-This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+This uses the same mechanism as [the official expvar](https://golang.org/pkg/expvar/)
but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
as well as all your go-metrics.
diff --git a/metrics/config.go b/metrics/config.go
new file mode 100644
index 000000000..2eb09fb48
--- /dev/null
+++ b/metrics/config.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package metrics
+
+// Config contains the configuration for the metric collection.
+type Config struct {
+ Enabled bool `toml:",omitempty"`
+ EnabledExpensive bool `toml:",omitempty"`
+ HTTP string `toml:",omitempty"`
+ Port int `toml:",omitempty"`
+ EnableInfluxDB bool `toml:",omitempty"`
+ InfluxDBEndpoint string `toml:",omitempty"`
+ InfluxDBDatabase string `toml:",omitempty"`
+ InfluxDBUsername string `toml:",omitempty"`
+ InfluxDBPassword string `toml:",omitempty"`
+ InfluxDBTags string `toml:",omitempty"`
+
+ EnableInfluxDBV2 bool `toml:",omitempty"`
+ InfluxDBToken string `toml:",omitempty"`
+ InfluxDBBucket string `toml:",omitempty"`
+ InfluxDBOrganization string `toml:",omitempty"`
+}
+
+// DefaultConfig is the default config for metrics used in go-ethereum.
+var DefaultConfig = Config{
+ Enabled: false,
+ EnabledExpensive: false,
+ HTTP: "127.0.0.1",
+ Port: 6060,
+ EnableInfluxDB: false,
+ InfluxDBEndpoint: "http://localhost:8086",
+ InfluxDBDatabase: "geth",
+ InfluxDBUsername: "test",
+ InfluxDBPassword: "test",
+ InfluxDBTags: "host=localhost",
+
+ // influxdbv2-specific flags
+ EnableInfluxDBV2: false,
+ InfluxDBToken: "test",
+ InfluxDBBucket: "geth",
+ InfluxDBOrganization: "geth",
+}
diff --git a/metrics/counter.go b/metrics/counter.go
index c7f2b4bd3..cb81599c2 100644
--- a/metrics/counter.go
+++ b/metrics/counter.go
@@ -1,14 +1,19 @@
package metrics
-import "sync/atomic"
+import (
+ "sync/atomic"
+)
+
+type CounterSnapshot interface {
+ Count() int64
+}
// Counters hold an int64 value that can be incremented and decremented.
type Counter interface {
Clear()
- Count() int64
Dec(int64)
Inc(int64)
- Snapshot() Counter
+ Snapshot() CounterSnapshot
}
// GetOrRegisterCounter returns an existing Counter or constructs and registers
@@ -20,12 +25,29 @@ func GetOrRegisterCounter(name string, r Registry) Counter {
return r.GetOrRegister(name, NewCounter).(Counter)
}
+// GetOrRegisterCounterForced returns an existing Counter or constructs and registers a
+// new Counter no matter the global switch is enabled or not.
+// Be sure to unregister the counter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterCounterForced(name string, r Registry) Counter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounterForced).(Counter)
+}
+
// NewCounter constructs a new StandardCounter.
func NewCounter() Counter {
if !Enabled {
return NilCounter{}
}
- return &StandardCounter{0}
+ return new(StandardCounter)
+}
+
+// NewCounterForced constructs a new StandardCounter and returns it no matter if
+// the global switch is enabled or not.
+func NewCounterForced() Counter {
+ return new(StandardCounter)
}
// NewRegisteredCounter constructs and registers a new StandardCounter.
@@ -38,75 +60,53 @@ func NewRegisteredCounter(name string, r Registry) Counter {
return c
}
-// CounterSnapshot is a read-only copy of another Counter.
-type CounterSnapshot int64
-
-// Clear panics.
-func (CounterSnapshot) Clear() {
- panic("Clear called on a CounterSnapshot")
-}
-
-// Count returns the count at the time the snapshot was taken.
-func (c CounterSnapshot) Count() int64 { return int64(c) }
-
-// Dec panics.
-func (CounterSnapshot) Dec(int64) {
- panic("Dec called on a CounterSnapshot")
+// NewRegisteredCounterForced constructs and registers a new StandardCounter
+// and launches a goroutine no matter the global switch is enabled or not.
+// Be sure to unregister the counter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredCounterForced(name string, r Registry) Counter {
+ c := NewCounterForced()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
}
-// Inc panics.
-func (CounterSnapshot) Inc(int64) {
- panic("Inc called on a CounterSnapshot")
-}
+// counterSnapshot is a read-only copy of another Counter.
+type counterSnapshot int64
-// Snapshot returns the snapshot.
-func (c CounterSnapshot) Snapshot() Counter { return c }
+// Count returns the count at the time the snapshot was taken.
+func (c counterSnapshot) Count() int64 { return int64(c) }
// NilCounter is a no-op Counter.
type NilCounter struct{}
-// Clear is a no-op.
-func (NilCounter) Clear() {}
-
-// Count is a no-op.
-func (NilCounter) Count() int64 { return 0 }
-
-// Dec is a no-op.
-func (NilCounter) Dec(i int64) {}
-
-// Inc is a no-op.
-func (NilCounter) Inc(i int64) {}
-
-// Snapshot is a no-op.
-func (NilCounter) Snapshot() Counter { return NilCounter{} }
+func (NilCounter) Clear() {}
+func (NilCounter) Dec(i int64) {}
+func (NilCounter) Inc(i int64) {}
+func (NilCounter) Snapshot() CounterSnapshot { return (*emptySnapshot)(nil) }
// StandardCounter is the standard implementation of a Counter and uses the
// sync/atomic package to manage a single int64 value.
-type StandardCounter struct {
- count int64
-}
+type StandardCounter atomic.Int64
// Clear sets the counter to zero.
func (c *StandardCounter) Clear() {
- atomic.StoreInt64(&c.count, 0)
-}
-
-// Count returns the current count.
-func (c *StandardCounter) Count() int64 {
- return atomic.LoadInt64(&c.count)
+ (*atomic.Int64)(c).Store(0)
}
// Dec decrements the counter by the given amount.
func (c *StandardCounter) Dec(i int64) {
- atomic.AddInt64(&c.count, -i)
+ (*atomic.Int64)(c).Add(-i)
}
// Inc increments the counter by the given amount.
func (c *StandardCounter) Inc(i int64) {
- atomic.AddInt64(&c.count, i)
+ (*atomic.Int64)(c).Add(i)
}
// Snapshot returns a read-only copy of the counter.
-func (c *StandardCounter) Snapshot() Counter {
- return CounterSnapshot(c.Count())
+func (c *StandardCounter) Snapshot() CounterSnapshot {
+ return counterSnapshot((*atomic.Int64)(c).Load())
}
diff --git a/metrics/counter_float64.go b/metrics/counter_float64.go
new file mode 100644
index 000000000..15c81494e
--- /dev/null
+++ b/metrics/counter_float64.go
@@ -0,0 +1,126 @@
+package metrics
+
+import (
+ "math"
+ "sync/atomic"
+)
+
+type CounterFloat64Snapshot interface {
+ Count() float64
+}
+
+// CounterFloat64 holds a float64 value that can be incremented and decremented.
+type CounterFloat64 interface {
+ Clear()
+ Dec(float64)
+ Inc(float64)
+ Snapshot() CounterFloat64Snapshot
+}
+
+// GetOrRegisterCounterFloat64 returns an existing CounterFloat64 or constructs and registers
+// a new StandardCounterFloat64.
+func GetOrRegisterCounterFloat64(name string, r Registry) CounterFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounterFloat64).(CounterFloat64)
+}
+
+// GetOrRegisterCounterFloat64Forced returns an existing CounterFloat64 or constructs and registers a
+// new CounterFloat64 no matter the global switch is enabled or not.
+// Be sure to unregister the counter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterCounterFloat64Forced(name string, r Registry) CounterFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounterFloat64Forced).(CounterFloat64)
+}
+
+// NewCounterFloat64 constructs a new StandardCounterFloat64.
+func NewCounterFloat64() CounterFloat64 {
+ if !Enabled {
+ return NilCounterFloat64{}
+ }
+ return &StandardCounterFloat64{}
+}
+
+// NewCounterFloat64Forced constructs a new StandardCounterFloat64 and returns it no matter if
+// the global switch is enabled or not.
+func NewCounterFloat64Forced() CounterFloat64 {
+ return &StandardCounterFloat64{}
+}
+
+// NewRegisteredCounterFloat64 constructs and registers a new StandardCounterFloat64.
+func NewRegisteredCounterFloat64(name string, r Registry) CounterFloat64 {
+ c := NewCounterFloat64()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewRegisteredCounterFloat64Forced constructs and registers a new StandardCounterFloat64
+// and launches a goroutine no matter the global switch is enabled or not.
+// Be sure to unregister the counter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredCounterFloat64Forced(name string, r Registry) CounterFloat64 {
+ c := NewCounterFloat64Forced()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// counterFloat64Snapshot is a read-only copy of another CounterFloat64.
+type counterFloat64Snapshot float64
+
+// Count returns the value at the time the snapshot was taken.
+func (c counterFloat64Snapshot) Count() float64 { return float64(c) }
+
+type NilCounterFloat64 struct{}
+
+func (NilCounterFloat64) Clear() {}
+func (NilCounterFloat64) Count() float64 { return 0.0 }
+func (NilCounterFloat64) Dec(i float64) {}
+func (NilCounterFloat64) Inc(i float64) {}
+func (NilCounterFloat64) Snapshot() CounterFloat64Snapshot { return NilCounterFloat64{} }
+
+// StandardCounterFloat64 is the standard implementation of a CounterFloat64 and uses the
+// atomic to manage a single float64 value.
+type StandardCounterFloat64 struct {
+ floatBits atomic.Uint64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounterFloat64) Clear() {
+ c.floatBits.Store(0)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounterFloat64) Dec(v float64) {
+ atomicAddFloat(&c.floatBits, -v)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounterFloat64) Inc(v float64) {
+ atomicAddFloat(&c.floatBits, v)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounterFloat64) Snapshot() CounterFloat64Snapshot {
+ v := math.Float64frombits(c.floatBits.Load())
+ return counterFloat64Snapshot(v)
+}
+
+func atomicAddFloat(fbits *atomic.Uint64, v float64) {
+ for {
+ loadedBits := fbits.Load()
+ newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+ if fbits.CompareAndSwap(loadedBits, newBits) {
+ break
+ }
+ }
+}
diff --git a/metrics/counter_float_64_test.go b/metrics/counter_float_64_test.go
new file mode 100644
index 000000000..c21bd3307
--- /dev/null
+++ b/metrics/counter_float_64_test.go
@@ -0,0 +1,99 @@
+package metrics
+
+import (
+ "sync"
+ "testing"
+)
+
+func BenchmarkCounterFloat64(b *testing.B) {
+ c := NewCounterFloat64()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ c.Inc(1.0)
+ }
+}
+
+func BenchmarkCounterFloat64Parallel(b *testing.B) {
+ c := NewCounterFloat64()
+ b.ResetTimer()
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ for i := 0; i < b.N; i++ {
+ c.Inc(1.0)
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ if have, want := c.Snapshot().Count(), 10.0*float64(b.N); have != want {
+ b.Fatalf("have %f want %f", have, want)
+ }
+}
+
+func TestCounterFloat64Clear(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Inc(1.0)
+ c.Clear()
+ if count := c.Snapshot().Count(); count != 0 {
+ t.Errorf("c.Count(): 0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Dec1(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Dec(1.0)
+ if count := c.Snapshot().Count(); count != -1.0 {
+ t.Errorf("c.Count(): -1.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Dec2(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Dec(2.0)
+ if count := c.Snapshot().Count(); count != -2.0 {
+ t.Errorf("c.Count(): -2.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Inc1(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Inc(1.0)
+ if count := c.Snapshot().Count(); count != 1.0 {
+ t.Errorf("c.Count(): 1.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Inc2(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Inc(2.0)
+ if count := c.Snapshot().Count(); count != 2.0 {
+ t.Errorf("c.Count(): 2.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Snapshot(t *testing.T) {
+ c := NewCounterFloat64()
+ c.Inc(1.0)
+ snapshot := c.Snapshot()
+ c.Inc(1.0)
+ if count := snapshot.Count(); count != 1.0 {
+ t.Errorf("c.Count(): 1.0 != %v\n", count)
+ }
+}
+
+func TestCounterFloat64Zero(t *testing.T) {
+ c := NewCounterFloat64()
+ if count := c.Snapshot().Count(); count != 0 {
+ t.Errorf("c.Count(): 0 != %v\n", count)
+ }
+}
+
+func TestGetOrRegisterCounterFloat64(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredCounterFloat64("foo", r).Inc(47.0)
+ if c := GetOrRegisterCounterFloat64("foo", r).Snapshot(); c.Count() != 47.0 {
+ t.Fatal(c)
+ }
+}
diff --git a/metrics/counter_test.go b/metrics/counter_test.go
index dfb03b4e8..1b15b23f2 100644
--- a/metrics/counter_test.go
+++ b/metrics/counter_test.go
@@ -14,7 +14,7 @@ func TestCounterClear(t *testing.T) {
c := NewCounter()
c.Inc(1)
c.Clear()
- if count := c.Count(); 0 != count {
+ if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@@ -22,7 +22,7 @@ func TestCounterClear(t *testing.T) {
func TestCounterDec1(t *testing.T) {
c := NewCounter()
c.Dec(1)
- if count := c.Count(); -1 != count {
+ if count := c.Snapshot().Count(); count != -1 {
t.Errorf("c.Count(): -1 != %v\n", count)
}
}
@@ -30,7 +30,7 @@ func TestCounterDec1(t *testing.T) {
func TestCounterDec2(t *testing.T) {
c := NewCounter()
c.Dec(2)
- if count := c.Count(); -2 != count {
+ if count := c.Snapshot().Count(); count != -2 {
t.Errorf("c.Count(): -2 != %v\n", count)
}
}
@@ -38,7 +38,7 @@ func TestCounterDec2(t *testing.T) {
func TestCounterInc1(t *testing.T) {
c := NewCounter()
c.Inc(1)
- if count := c.Count(); 1 != count {
+ if count := c.Snapshot().Count(); count != 1 {
t.Errorf("c.Count(): 1 != %v\n", count)
}
}
@@ -46,7 +46,7 @@ func TestCounterInc1(t *testing.T) {
func TestCounterInc2(t *testing.T) {
c := NewCounter()
c.Inc(2)
- if count := c.Count(); 2 != count {
+ if count := c.Snapshot().Count(); count != 2 {
t.Errorf("c.Count(): 2 != %v\n", count)
}
}
@@ -56,14 +56,14 @@ func TestCounterSnapshot(t *testing.T) {
c.Inc(1)
snapshot := c.Snapshot()
c.Inc(1)
- if count := snapshot.Count(); 1 != count {
+ if count := snapshot.Count(); count != 1 {
t.Errorf("c.Count(): 1 != %v\n", count)
}
}
func TestCounterZero(t *testing.T) {
c := NewCounter()
- if count := c.Count(); 0 != count {
+ if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
@@ -71,7 +71,7 @@ func TestCounterZero(t *testing.T) {
func TestGetOrRegisterCounter(t *testing.T) {
r := NewRegistry()
NewRegisteredCounter("foo", r).Inc(47)
- if c := GetOrRegisterCounter("foo", r); 47 != c.Count() {
+ if c := GetOrRegisterCounter("foo", r).Snapshot(); c.Count() != 47 {
t.Fatal(c)
}
}
diff --git a/metrics/cpu.go b/metrics/cpu.go
new file mode 100644
index 000000000..3a49cd424
--- /dev/null
+++ b/metrics/cpu.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package metrics
+
+// CPUStats is the system and process CPU stats.
+// All values are in seconds.
+type CPUStats struct {
+ GlobalTime float64 // Time spent by the CPU working on all processes
+ GlobalWait float64 // Time spent by waiting on disk for all processes
+ LocalTime float64 // Time spent by the CPU working on this process
+}
diff --git a/metrics/cpu_disabled.go b/metrics/cpu_disabled.go
new file mode 100644
index 000000000..025d97aeb
--- /dev/null
+++ b/metrics/cpu_disabled.go
@@ -0,0 +1,24 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build ios || js
+// +build ios js
+
+package metrics
+
+// ReadCPUStats retrieves the current CPU stats. Internally this uses `gosigar`,
+// which is not supported on the platforms in this file.
+func ReadCPUStats(stats *CPUStats) {}
diff --git a/metrics/cpu_enabled.go b/metrics/cpu_enabled.go
new file mode 100644
index 000000000..7620e1cf5
--- /dev/null
+++ b/metrics/cpu_enabled.go
@@ -0,0 +1,44 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build !ios && !js
+// +build !ios,!js
+
+package metrics
+
+import (
+ "github.com/shirou/gopsutil/cpu"
+ "github.com/tomochain/tomochain/log"
+)
+
+// ReadCPUStats retrieves the current CPU stats.
+func ReadCPUStats(stats *CPUStats) {
+ // passing false to request all cpu times
+ timeStats, err := cpu.Times(false)
+ if err != nil {
+ log.Error("Could not read cpu stats", "err", err)
+ return
+ }
+ if len(timeStats) == 0 {
+ log.Error("Empty cpu stats")
+ return
+ }
+ // requesting all cpu times will always return an array with only one time stats entry
+ timeStat := timeStats[0]
+ stats.GlobalTime = timeStat.User + timeStat.Nice + timeStat.System
+ stats.GlobalWait = timeStat.Iowait
+ stats.LocalTime = getProcessCPUTime()
+}
diff --git a/metrics/cputime_nop.go b/metrics/cputime_nop.go
new file mode 100644
index 000000000..465d88c4d
--- /dev/null
+++ b/metrics/cputime_nop.go
@@ -0,0 +1,26 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build windows || js
+// +build windows js
+
+package metrics
+
+// getProcessCPUTime returns 0 on Windows as there is no system call to resolve
+// the actual process' CPU time.
+func getProcessCPUTime() float64 {
+ return 0
+}
diff --git a/metrics/cputime_unix.go b/metrics/cputime_unix.go
new file mode 100644
index 000000000..60b04268e
--- /dev/null
+++ b/metrics/cputime_unix.go
@@ -0,0 +1,36 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build !windows && !js
+// +build !windows,!js
+
+package metrics
+
+import (
+ syscall "golang.org/x/sys/unix"
+
+ "github.com/tomochain/tomochain/log"
+)
+
+// getProcessCPUTime retrieves the process' CPU time since program startup.
+func getProcessCPUTime() float64 {
+ var usage syscall.Rusage
+ if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil {
+ log.Warn("Failed to retrieve CPU time", "err", err)
+ return 0
+ }
+ return float64(usage.Utime.Sec+usage.Stime.Sec) + float64(usage.Utime.Usec+usage.Stime.Usec)/1000000 //nolint:unconvert
+}
diff --git a/metrics/disk_nop.go b/metrics/disk_nop.go
index 4319f8b27..58fa4e02f 100644
--- a/metrics/disk_nop.go
+++ b/metrics/disk_nop.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !linux
// +build !linux
package metrics
diff --git a/metrics/ewma.go b/metrics/ewma.go
index 3aecd4fa3..1d7a4f00c 100644
--- a/metrics/ewma.go
+++ b/metrics/ewma.go
@@ -4,22 +4,23 @@ import (
"math"
"sync"
"sync/atomic"
+ "time"
)
+type EWMASnapshot interface {
+ Rate() float64
+}
+
// EWMAs continuously calculate an exponentially-weighted moving average
// based on an outside source of clock ticks.
type EWMA interface {
- Rate() float64
- Snapshot() EWMA
+ Snapshot() EWMASnapshot
Tick()
Update(int64)
}
// NewEWMA constructs a new EWMA with the given alpha.
func NewEWMA(alpha float64) EWMA {
- if !Enabled {
- return NilEWMA{}
- }
return &StandardEWMA{alpha: alpha}
}
@@ -38,81 +39,73 @@ func NewEWMA15() EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/15))
}
-// EWMASnapshot is a read-only copy of another EWMA.
-type EWMASnapshot float64
+// ewmaSnapshot is a read-only copy of another EWMA.
+type ewmaSnapshot float64
// Rate returns the rate of events per second at the time the snapshot was
// taken.
-func (a EWMASnapshot) Rate() float64 { return float64(a) }
-
-// Snapshot returns the snapshot.
-func (a EWMASnapshot) Snapshot() EWMA { return a }
-
-// Tick panics.
-func (EWMASnapshot) Tick() {
- panic("Tick called on an EWMASnapshot")
-}
-
-// Update panics.
-func (EWMASnapshot) Update(int64) {
- panic("Update called on an EWMASnapshot")
-}
+func (a ewmaSnapshot) Rate() float64 { return float64(a) }
// NilEWMA is a no-op EWMA.
type NilEWMA struct{}
-// Rate is a no-op.
-func (NilEWMA) Rate() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
-
-// Tick is a no-op.
-func (NilEWMA) Tick() {}
-
-// Update is a no-op.
-func (NilEWMA) Update(n int64) {}
+func (NilEWMA) Snapshot() EWMASnapshot { return (*emptySnapshot)(nil) }
+func (NilEWMA) Tick() {}
+func (NilEWMA) Update(n int64) {}
// StandardEWMA is the standard implementation of an EWMA and tracks the number
// of uncounted events and processes them on each tick. It uses the
// sync/atomic package to manage uncounted events.
type StandardEWMA struct {
- uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+ uncounted atomic.Int64
alpha float64
- rate float64
- init bool
+ rate atomic.Uint64
+ init atomic.Bool
mutex sync.Mutex
}
-// Rate returns the moving average rate of events per second.
-func (a *StandardEWMA) Rate() float64 {
- a.mutex.Lock()
- defer a.mutex.Unlock()
- return a.rate * float64(1e9)
-}
-
// Snapshot returns a read-only copy of the EWMA.
-func (a *StandardEWMA) Snapshot() EWMA {
- return EWMASnapshot(a.Rate())
+func (a *StandardEWMA) Snapshot() EWMASnapshot {
+ r := math.Float64frombits(a.rate.Load()) * float64(time.Second)
+ return ewmaSnapshot(r)
}
// Tick ticks the clock to update the moving average. It assumes it is called
// every five seconds.
func (a *StandardEWMA) Tick() {
- count := atomic.LoadInt64(&a.uncounted)
- atomic.AddInt64(&a.uncounted, -count)
- instantRate := float64(count) / float64(5e9)
+ // Optimization to avoid mutex locking in the hot-path.
+ if a.init.Load() {
+ a.updateRate(a.fetchInstantRate())
+ return
+ }
+ // Slow-path: this is only needed on the first Tick() and preserves transactional updating
+ // of init and rate in the else block. The first conditional is needed below because
+ // a different thread could have set a.init = 1 between the time of the first atomic load and when
+ // the lock was acquired.
a.mutex.Lock()
- defer a.mutex.Unlock()
- if a.init {
- a.rate += a.alpha * (instantRate - a.rate)
+ if a.init.Load() {
+ // The fetchInstantRate() uses atomic loading, which is unnecessary in this critical section
+ // but again, this section is only invoked on the first successful Tick() operation.
+ a.updateRate(a.fetchInstantRate())
} else {
- a.init = true
- a.rate = instantRate
+ a.init.Store(true)
+ a.rate.Store(math.Float64bits(a.fetchInstantRate()))
}
+ a.mutex.Unlock()
+}
+
+func (a *StandardEWMA) fetchInstantRate() float64 {
+ count := a.uncounted.Swap(0)
+ return float64(count) / float64(5*time.Second)
+}
+
+func (a *StandardEWMA) updateRate(instantRate float64) {
+ currentRate := math.Float64frombits(a.rate.Load())
+ currentRate += a.alpha * (instantRate - currentRate)
+ a.rate.Store(math.Float64bits(currentRate))
}
// Update adds n uncounted events.
func (a *StandardEWMA) Update(n int64) {
- atomic.AddInt64(&a.uncounted, n)
+ a.uncounted.Add(n)
}
diff --git a/metrics/ewma_test.go b/metrics/ewma_test.go
index 0430fbd24..9a91b43db 100644
--- a/metrics/ewma_test.go
+++ b/metrics/ewma_test.go
@@ -1,6 +1,11 @@
package metrics
-import "testing"
+import (
+ "math"
+ "testing"
+)
+
+const epsilon = 0.0000000000000001
func BenchmarkEWMA(b *testing.B) {
a := NewEWMA1()
@@ -11,72 +16,33 @@ func BenchmarkEWMA(b *testing.B) {
}
}
+func BenchmarkEWMAParallel(b *testing.B) {
+ a := NewEWMA1()
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ a.Update(1)
+ a.Tick()
+ }
+ })
+}
+
func TestEWMA1(t *testing.T) {
a := NewEWMA1()
a.Update(3)
a.Tick()
- if rate := a.Rate(); 0.6 != rate {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.22072766470286553 != rate {
- t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.08120116994196772 != rate {
- t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.029872241020718428 != rate {
- t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.01098938333324054 != rate {
- t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.004042768199451294 != rate {
- t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.0014872513059998212 != rate {
- t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.0005471291793327122 != rate {
- t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.00020127757674150815 != rate {
- t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 7.404588245200814e-05 != rate {
- t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 2.7239957857491083e-05 != rate {
- t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 1.0021020474147462e-05 != rate {
- t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 3.6865274119969525e-06 != rate {
- t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 1.3561976441886433e-06 != rate {
- t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 4.989172314621449e-07 != rate {
- t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 1.8354139230109722e-07 != rate {
- t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
+ for i, want := range []float64{0.6,
+ 0.22072766470286553, 0.08120116994196772, 0.029872241020718428,
+ 0.01098938333324054, 0.004042768199451294, 0.0014872513059998212,
+ 0.0005471291793327122, 0.00020127757674150815, 7.404588245200814e-05,
+ 2.7239957857491083e-05, 1.0021020474147462e-05, 3.6865274119969525e-06,
+ 1.3561976441886433e-06, 4.989172314621449e-07, 1.8354139230109722e-07,
+ } {
+ if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
+ t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
+ }
+ elapseMinute(a)
}
}
@@ -84,68 +50,17 @@ func TestEWMA5(t *testing.T) {
a := NewEWMA5()
a.Update(3)
a.Tick()
- if rate := a.Rate(); 0.6 != rate {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.49123845184678905 != rate {
- t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.4021920276213837 != rate {
- t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.32928698165641596 != rate {
- t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.269597378470333 != rate {
- t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.2207276647028654 != rate {
- t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.18071652714732128 != rate {
- t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.14795817836496392 != rate {
- t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.12113791079679326 != rate {
- t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.09917933293295193 != rate {
- t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.08120116994196763 != rate {
- t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.06648189501740036 != rate {
- t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.05443077197364752 != rate {
- t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.04456414692860035 != rate {
- t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.03648603757513079 != rate {
- t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.0298722410207183831020718428 != rate {
- t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
+ for i, want := range []float64{
+ 0.6, 0.49123845184678905, 0.4021920276213837, 0.32928698165641596,
+ 0.269597378470333, 0.2207276647028654, 0.18071652714732128,
+ 0.14795817836496392, 0.12113791079679326, 0.09917933293295193,
+ 0.08120116994196763, 0.06648189501740036, 0.05443077197364752,
+ 0.04456414692860035, 0.03648603757513079, 0.0298722410207183831020718428,
+ } {
+ if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
+ t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
+ }
+ elapseMinute(a)
}
}
@@ -153,68 +68,17 @@ func TestEWMA15(t *testing.T) {
a := NewEWMA15()
a.Update(3)
a.Tick()
- if rate := a.Rate(); 0.6 != rate {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.5613041910189706 != rate {
- t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.5251039914257684 != rate {
- t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.4912384518467888184678905 != rate {
- t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.459557003018789 != rate {
- t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.4299187863442732 != rate {
- t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.4021920276213831 != rate {
- t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.37625345116383313 != rate {
- t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.3519877317060185 != rate {
- t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.3292869816564153165641596 != rate {
- t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.3080502714195546 != rate {
- t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.2881831806538789 != rate {
- t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.26959737847033216 != rate {
- t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.2522102307052083 != rate {
- t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.23594443252115815 != rate {
- t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); 0.2207276647028646247028654470286553 != rate {
- t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
+ for i, want := range []float64{
+ 0.6, 0.5613041910189706, 0.5251039914257684, 0.4912384518467888184678905,
+ 0.459557003018789, 0.4299187863442732, 0.4021920276213831,
+ 0.37625345116383313, 0.3519877317060185, 0.3292869816564153165641596,
+ 0.3080502714195546, 0.2881831806538789, 0.26959737847033216,
+ 0.2522102307052083, 0.23594443252115815, 0.2207276647028646247028654470286553,
+ } {
+ if rate := a.Snapshot().Rate(); math.Abs(want-rate) > epsilon {
+ t.Errorf("%d minute a.Snapshot().Rate(): %f != %v\n", i, want, rate)
+ }
+ elapseMinute(a)
}
}
diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go
index e25026d6e..3064021ae 100644
--- a/metrics/exp/exp.go
+++ b/metrics/exp/exp.go
@@ -8,7 +8,9 @@ import (
"net/http"
"sync"
+ "github.com/tomochain/tomochain/log"
"github.com/tomochain/tomochain/metrics"
+ "github.com/tomochain/tomochain/metrics/prometheus"
)
type exp struct {
@@ -42,6 +44,7 @@ func Exp(r metrics.Registry) {
// http.HandleFunc("/debug/vars", e.expHandler)
// haven't found an elegant way, so just use a different endpoint
http.Handle("/debug/metrics", h)
+ http.Handle("/debug/metrics/prometheus", prometheus.Handler(r))
}
// ExpHandler will return an expvar powered metrics handler.
@@ -50,6 +53,20 @@ func ExpHandler(r metrics.Registry) http.Handler {
return http.HandlerFunc(e.expHandler)
}
+// Setup starts a dedicated metrics server at the given address.
+// This function enables metrics reporting separate from pprof.
+func Setup(address string) {
+ m := http.NewServeMux()
+ m.Handle("/debug/metrics", ExpHandler(metrics.DefaultRegistry))
+ m.Handle("/debug/metrics/prometheus", prometheus.Handler(metrics.DefaultRegistry))
+ log.Info("Starting metrics server", "addr", fmt.Sprintf("http://%s/debug/metrics", address))
+ go func() {
+ if err := http.ListenAndServe(address, m); err != nil {
+ log.Error("Failure in running metrics server", "err", err)
+ }
+ }()
+}
+
func (exp *exp) getInt(name string) *expvar.Int {
var v *expvar.Int
exp.expvarLock.Lock()
@@ -78,19 +95,42 @@ func (exp *exp) getFloat(name string) *expvar.Float {
return v
}
-func (exp *exp) publishCounter(name string, metric metrics.Counter) {
+func (exp *exp) getInfo(name string) *expvar.String {
+ var v *expvar.String
+ exp.expvarLock.Lock()
+ p := expvar.Get(name)
+ if p != nil {
+ v = p.(*expvar.String)
+ } else {
+ v = new(expvar.String)
+ expvar.Publish(name, v)
+ }
+ exp.expvarLock.Unlock()
+ return v
+}
+
+func (exp *exp) publishCounter(name string, metric metrics.CounterSnapshot) {
v := exp.getInt(name)
v.Set(metric.Count())
}
-func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
+func (exp *exp) publishCounterFloat64(name string, metric metrics.CounterFloat64Snapshot) {
+ v := exp.getFloat(name)
+ v.Set(metric.Count())
+}
+
+func (exp *exp) publishGauge(name string, metric metrics.GaugeSnapshot) {
v := exp.getInt(name)
v.Set(metric.Value())
}
-func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
+func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64Snapshot) {
exp.getFloat(name).Set(metric.Value())
}
+func (exp *exp) publishGaugeInfo(name string, metric metrics.GaugeInfoSnapshot) {
+ exp.getInfo(name).Set(metric.Value().String())
+}
+
func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
@@ -111,7 +151,7 @@ func (exp *exp) publishMeter(name string, metric metrics.Meter) {
exp.getInt(name + ".count").Set(m.Count())
exp.getFloat(name + ".one-minute").Set(m.Rate1())
exp.getFloat(name + ".five-minute").Set(m.Rate5())
- exp.getFloat(name + ".fifteen-minute").Set((m.Rate15()))
+ exp.getFloat(name + ".fifteen-minute").Set(m.Rate15())
exp.getFloat(name + ".mean").Set(m.RateMean())
}
@@ -134,21 +174,38 @@ func (exp *exp) publishTimer(name string, metric metrics.Timer) {
exp.getFloat(name + ".mean-rate").Set(t.RateMean())
}
+func (exp *exp) publishResettingTimer(name string, metric metrics.ResettingTimer) {
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.50, 0.75, 0.95, 0.99})
+ exp.getInt(name + ".count").Set(int64(t.Count()))
+ exp.getFloat(name + ".mean").Set(t.Mean())
+ exp.getFloat(name + ".50-percentile").Set(ps[0])
+ exp.getFloat(name + ".75-percentile").Set(ps[1])
+ exp.getFloat(name + ".95-percentile").Set(ps[2])
+ exp.getFloat(name + ".99-percentile").Set(ps[3])
+}
+
func (exp *exp) syncToExpvar() {
exp.registry.Each(func(name string, i interface{}) {
- switch i.(type) {
+ switch i := i.(type) {
case metrics.Counter:
- exp.publishCounter(name, i.(metrics.Counter))
+ exp.publishCounter(name, i.Snapshot())
+ case metrics.CounterFloat64:
+ exp.publishCounterFloat64(name, i.Snapshot())
case metrics.Gauge:
- exp.publishGauge(name, i.(metrics.Gauge))
+ exp.publishGauge(name, i.Snapshot())
case metrics.GaugeFloat64:
- exp.publishGaugeFloat64(name, i.(metrics.GaugeFloat64))
+ exp.publishGaugeFloat64(name, i.Snapshot())
+ case metrics.GaugeInfo:
+ exp.publishGaugeInfo(name, i.Snapshot())
case metrics.Histogram:
- exp.publishHistogram(name, i.(metrics.Histogram))
+ exp.publishHistogram(name, i)
case metrics.Meter:
- exp.publishMeter(name, i.(metrics.Meter))
+ exp.publishMeter(name, i)
case metrics.Timer:
- exp.publishTimer(name, i.(metrics.Timer))
+ exp.publishTimer(name, i)
+ case metrics.ResettingTimer:
+ exp.publishResettingTimer(name, i)
default:
panic(fmt.Sprintf("unsupported type for '%s': %T", name, i))
}
diff --git a/metrics/gauge.go b/metrics/gauge.go
index 0fbfdb860..68f8f11ab 100644
--- a/metrics/gauge.go
+++ b/metrics/gauge.go
@@ -2,11 +2,18 @@ package metrics
import "sync/atomic"
+// gaugeSnapshot contains a readonly int64.
+type GaugeSnapshot interface {
+ Value() int64
+}
+
// Gauges hold an int64 value that can be set arbitrarily.
type Gauge interface {
- Snapshot() Gauge
+ Snapshot() GaugeSnapshot
Update(int64)
- Value() int64
+ UpdateIfGt(int64)
+ Dec(int64)
+ Inc(int64)
}
// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
@@ -23,7 +30,7 @@ func NewGauge() Gauge {
if !Enabled {
return NilGauge{}
}
- return &StandardGauge{0}
+ return &StandardGauge{}
}
// NewRegisteredGauge constructs and registers a new StandardGauge.
@@ -36,85 +43,56 @@ func NewRegisteredGauge(name string, r Registry) Gauge {
return c
}
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGauge(f func() int64) Gauge {
- if !Enabled {
- return NilGauge{}
- }
- return &FunctionalGauge{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
- c := NewFunctionalGauge(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeSnapshot is a read-only copy of another Gauge.
-type GaugeSnapshot int64
-
-// Snapshot returns the snapshot.
-func (g GaugeSnapshot) Snapshot() Gauge { return g }
-
-// Update panics.
-func (GaugeSnapshot) Update(int64) {
- panic("Update called on a GaugeSnapshot")
-}
+// gaugeSnapshot is a read-only copy of another Gauge.
+type gaugeSnapshot int64
// Value returns the value at the time the snapshot was taken.
-func (g GaugeSnapshot) Value() int64 { return int64(g) }
+func (g gaugeSnapshot) Value() int64 { return int64(g) }
// NilGauge is a no-op Gauge.
type NilGauge struct{}
-// Snapshot is a no-op.
-func (NilGauge) Snapshot() Gauge { return NilGauge{} }
-
-// Update is a no-op.
-func (NilGauge) Update(v int64) {}
-
-// Value is a no-op.
-func (NilGauge) Value() int64 { return 0 }
+func (NilGauge) Snapshot() GaugeSnapshot { return (*emptySnapshot)(nil) }
+func (NilGauge) Update(v int64) {}
+func (NilGauge) UpdateIfGt(v int64) {}
+func (NilGauge) Dec(i int64) {}
+func (NilGauge) Inc(i int64) {}
// StandardGauge is the standard implementation of a Gauge and uses the
// sync/atomic package to manage a single int64 value.
type StandardGauge struct {
- value int64
+ value atomic.Int64
}
// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGauge) Snapshot() Gauge {
- return GaugeSnapshot(g.Value())
+func (g *StandardGauge) Snapshot() GaugeSnapshot {
+ return gaugeSnapshot(g.value.Load())
}
// Update updates the gauge's value.
func (g *StandardGauge) Update(v int64) {
- atomic.StoreInt64(&g.value, v)
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGauge) Value() int64 {
- return atomic.LoadInt64(&g.value)
+ g.value.Store(v)
}
-// FunctionalGauge returns value from given function
-type FunctionalGauge struct {
- value func() int64
+// Update updates the gauge's value if v is larger then the current valie.
+func (g *StandardGauge) UpdateIfGt(v int64) {
+ for {
+ exist := g.value.Load()
+ if exist >= v {
+ break
+ }
+ if g.value.CompareAndSwap(exist, v) {
+ break
+ }
+ }
}
-// Value returns the gauge's current value.
-func (g FunctionalGauge) Value() int64 {
- return g.value()
+// Dec decrements the gauge's current value by the given amount.
+func (g *StandardGauge) Dec(i int64) {
+ g.value.Add(-i)
}
-// Snapshot returns the snapshot.
-func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGauge) Update(int64) {
- panic("Update called on a FunctionalGauge")
+// Inc increments the gauge's current value by the given amount.
+func (g *StandardGauge) Inc(i int64) {
+ g.value.Add(i)
}
diff --git a/metrics/gauge_float64.go b/metrics/gauge_float64.go
index 66819c957..967f2bc60 100644
--- a/metrics/gauge_float64.go
+++ b/metrics/gauge_float64.go
@@ -1,12 +1,18 @@
package metrics
-import "sync"
+import (
+ "math"
+ "sync/atomic"
+)
-// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64Snapshot interface {
+ Value() float64
+}
+
+// GaugeFloat64 hold a float64 value that can be set arbitrarily.
type GaugeFloat64 interface {
- Snapshot() GaugeFloat64
+ Snapshot() GaugeFloat64Snapshot
Update(float64)
- Value() float64
}
// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
@@ -23,9 +29,7 @@ func NewGaugeFloat64() GaugeFloat64 {
if !Enabled {
return NilGaugeFloat64{}
}
- return &StandardGaugeFloat64{
- value: 0.0,
- }
+ return &StandardGaugeFloat64{}
}
// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
@@ -38,90 +42,32 @@ func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
return c
}
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
- if !Enabled {
- return NilGaugeFloat64{}
- }
- return &FunctionalGaugeFloat64{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
- c := NewFunctionalGaugeFloat64(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
-type GaugeFloat64Snapshot float64
-
-// Snapshot returns the snapshot.
-func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
-
-// Update panics.
-func (GaugeFloat64Snapshot) Update(float64) {
- panic("Update called on a GaugeFloat64Snapshot")
-}
+// gaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type gaugeFloat64Snapshot float64
// Value returns the value at the time the snapshot was taken.
-func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) }
// NilGauge is a no-op Gauge.
type NilGaugeFloat64 struct{}
-// Snapshot is a no-op.
-func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
-
-// Update is a no-op.
-func (NilGaugeFloat64) Update(v float64) {}
-
-// Value is a no-op.
-func (NilGaugeFloat64) Value() float64 { return 0.0 }
+func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} }
+func (NilGaugeFloat64) Update(v float64) {}
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
-// sync.Mutex to manage a single float64 value.
+// atomic to manage a single float64 value.
type StandardGaugeFloat64 struct {
- mutex sync.Mutex
- value float64
+ floatBits atomic.Uint64
}
// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
- return GaugeFloat64Snapshot(g.Value())
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64Snapshot {
+ v := math.Float64frombits(g.floatBits.Load())
+ return gaugeFloat64Snapshot(v)
}
// Update updates the gauge's value.
func (g *StandardGaugeFloat64) Update(v float64) {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- g.value = v
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGaugeFloat64) Value() float64 {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- return g.value
-}
-
-// FunctionalGaugeFloat64 returns value from given function
-type FunctionalGaugeFloat64 struct {
- value func() float64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGaugeFloat64) Value() float64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGaugeFloat64) Update(float64) {
- panic("Update called on a FunctionalGaugeFloat64")
+ g.floatBits.Store(math.Float64bits(v))
}
diff --git a/metrics/gauge_float64_test.go b/metrics/gauge_float64_test.go
index 99e62a403..f0ac7ea5e 100644
--- a/metrics/gauge_float64_test.go
+++ b/metrics/gauge_float64_test.go
@@ -1,8 +1,11 @@
package metrics
-import "testing"
+import (
+ "sync"
+ "testing"
+)
-func BenchmarkGuageFloat64(b *testing.B) {
+func BenchmarkGaugeFloat64(b *testing.B) {
g := NewGaugeFloat64()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -10,50 +13,39 @@ func BenchmarkGuageFloat64(b *testing.B) {
}
}
-func TestGaugeFloat64(t *testing.T) {
- g := NewGaugeFloat64()
- g.Update(float64(47.0))
- if v := g.Value(); float64(47.0) != v {
- t.Errorf("g.Value(): 47.0 != %v\n", v)
+func BenchmarkGaugeFloat64Parallel(b *testing.B) {
+ c := NewGaugeFloat64()
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ for i := 0; i < b.N; i++ {
+ c.Update(float64(i))
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ if have, want := c.Snapshot().Value(), float64(b.N-1); have != want {
+ b.Fatalf("have %f want %f", have, want)
}
}
func TestGaugeFloat64Snapshot(t *testing.T) {
g := NewGaugeFloat64()
- g.Update(float64(47.0))
+ g.Update(47.0)
snapshot := g.Snapshot()
g.Update(float64(0))
- if v := snapshot.Value(); float64(47.0) != v {
+ if v := snapshot.Value(); 47.0 != v {
t.Errorf("g.Value(): 47.0 != %v\n", v)
}
}
func TestGetOrRegisterGaugeFloat64(t *testing.T) {
r := NewRegistry()
- NewRegisteredGaugeFloat64("foo", r).Update(float64(47.0))
+ NewRegisteredGaugeFloat64("foo", r).Update(47.0)
t.Logf("registry: %v", r)
- if g := GetOrRegisterGaugeFloat64("foo", r); float64(47.0) != g.Value() {
- t.Fatal(g)
- }
-}
-
-func TestFunctionalGaugeFloat64(t *testing.T) {
- var counter float64
- fg := NewFunctionalGaugeFloat64(func() float64 {
- counter++
- return counter
- })
- fg.Value()
- fg.Value()
- if counter != 2 {
- t.Error("counter != 2")
- }
-}
-
-func TestGetOrRegisterFunctionalGaugeFloat64(t *testing.T) {
- r := NewRegistry()
- NewRegisteredFunctionalGaugeFloat64("foo", r, func() float64 { return 47 })
- if g := GetOrRegisterGaugeFloat64("foo", r); 47 != g.Value() {
+ if g := GetOrRegisterGaugeFloat64("foo", r).Snapshot(); 47.0 != g.Value() {
t.Fatal(g)
}
}
diff --git a/metrics/gauge_info.go b/metrics/gauge_info.go
new file mode 100644
index 000000000..c44b2d85f
--- /dev/null
+++ b/metrics/gauge_info.go
@@ -0,0 +1,84 @@
+package metrics
+
+import (
+ "encoding/json"
+ "sync"
+)
+
+type GaugeInfoSnapshot interface {
+ Value() GaugeInfoValue
+}
+
+// GaugeInfos hold a GaugeInfoValue value that can be set arbitrarily.
+type GaugeInfo interface {
+ Update(GaugeInfoValue)
+ Snapshot() GaugeInfoSnapshot
+}
+
+// GaugeInfoValue is a mapping of keys to values
+type GaugeInfoValue map[string]string
+
+func (val GaugeInfoValue) String() string {
+ data, _ := json.Marshal(val)
+ return string(data)
+}
+
+// GetOrRegisterGaugeInfo returns an existing GaugeInfo or constructs and registers a
+// new StandardGaugeInfo.
+func GetOrRegisterGaugeInfo(name string, r Registry) GaugeInfo {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGaugeInfo()).(GaugeInfo)
+}
+
+// NewGaugeInfo constructs a new StandardGaugeInfo.
+func NewGaugeInfo() GaugeInfo {
+ if !Enabled {
+ return NilGaugeInfo{}
+ }
+ return &StandardGaugeInfo{
+ value: GaugeInfoValue{},
+ }
+}
+
+// NewRegisteredGaugeInfo constructs and registers a new StandardGaugeInfo.
+func NewRegisteredGaugeInfo(name string, r Registry) GaugeInfo {
+ c := NewGaugeInfo()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// gaugeInfoSnapshot is a read-only copy of another GaugeInfo.
+type gaugeInfoSnapshot GaugeInfoValue
+
+// Value returns the value at the time the snapshot was taken.
+func (g gaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) }
+
+type NilGaugeInfo struct{}
+
+func (NilGaugeInfo) Snapshot() GaugeInfoSnapshot { return NilGaugeInfo{} }
+func (NilGaugeInfo) Update(v GaugeInfoValue) {}
+func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} }
+
+// StandardGaugeInfo is the standard implementation of a GaugeInfo and uses
+// sync.Mutex to manage a single string value.
+type StandardGaugeInfo struct {
+ mutex sync.Mutex
+ value GaugeInfoValue
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeInfo) Snapshot() GaugeInfoSnapshot {
+ return gaugeInfoSnapshot(g.value)
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeInfo) Update(v GaugeInfoValue) {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ g.value = v
+}
diff --git a/metrics/gauge_info_test.go b/metrics/gauge_info_test.go
new file mode 100644
index 000000000..319afbf92
--- /dev/null
+++ b/metrics/gauge_info_test.go
@@ -0,0 +1,36 @@
+package metrics
+
+import (
+ "testing"
+)
+
+func TestGaugeInfoJsonString(t *testing.T) {
+ g := NewGaugeInfo()
+ g.Update(GaugeInfoValue{
+ "chain_id": "5",
+ "anotherKey": "any_string_value",
+ "third_key": "anything",
+ },
+ )
+ want := `{"anotherKey":"any_string_value","chain_id":"5","third_key":"anything"}`
+
+ original := g.Snapshot()
+ g.Update(GaugeInfoValue{"value": "updated"})
+
+ if have := original.Value().String(); have != want {
+ t.Errorf("\nhave: %v\nwant: %v\n", have, want)
+ }
+ if have, want := g.Snapshot().Value().String(), `{"value":"updated"}`; have != want {
+ t.Errorf("\nhave: %v\nwant: %v\n", have, want)
+ }
+}
+
+func TestGetOrRegisterGaugeInfo(t *testing.T) {
+ r := NewRegistry()
+ NewRegisteredGaugeInfo("foo", r).Update(
+ GaugeInfoValue{"chain_id": "5"})
+ g := GetOrRegisterGaugeInfo("foo", r).Snapshot()
+ if have, want := g.Value().String(), `{"chain_id":"5"}`; have != want {
+ t.Errorf("have\n%v\nwant\n%v\n", have, want)
+ }
+}
diff --git a/metrics/gauge_test.go b/metrics/gauge_test.go
index 1f2603d33..f2ba930bc 100644
--- a/metrics/gauge_test.go
+++ b/metrics/gauge_test.go
@@ -1,11 +1,10 @@
package metrics
import (
- "fmt"
"testing"
)
-func BenchmarkGuage(b *testing.B) {
+func BenchmarkGauge(b *testing.B) {
g := NewGauge()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -13,20 +12,12 @@ func BenchmarkGuage(b *testing.B) {
}
}
-func TestGauge(t *testing.T) {
- g := NewGauge()
- g.Update(int64(47))
- if v := g.Value(); 47 != v {
- t.Errorf("g.Value(): 47 != %v\n", v)
- }
-}
-
func TestGaugeSnapshot(t *testing.T) {
g := NewGauge()
g.Update(int64(47))
snapshot := g.Snapshot()
g.Update(int64(0))
- if v := snapshot.Value(); 47 != v {
+ if v := snapshot.Value(); v != 47 {
t.Errorf("g.Value(): 47 != %v\n", v)
}
}
@@ -34,35 +25,7 @@ func TestGaugeSnapshot(t *testing.T) {
func TestGetOrRegisterGauge(t *testing.T) {
r := NewRegistry()
NewRegisteredGauge("foo", r).Update(47)
- if g := GetOrRegisterGauge("foo", r); 47 != g.Value() {
- t.Fatal(g)
- }
-}
-
-func TestFunctionalGauge(t *testing.T) {
- var counter int64
- fg := NewFunctionalGauge(func() int64 {
- counter++
- return counter
- })
- fg.Value()
- fg.Value()
- if counter != 2 {
- t.Error("counter != 2")
- }
-}
-
-func TestGetOrRegisterFunctionalGauge(t *testing.T) {
- r := NewRegistry()
- NewRegisteredFunctionalGauge("foo", r, func() int64 { return 47 })
- if g := GetOrRegisterGauge("foo", r); 47 != g.Value() {
+ if g := GetOrRegisterGauge("foo", r); g.Snapshot().Value() != 47 {
t.Fatal(g)
}
}
-
-func ExampleGetOrRegisterGauge() {
- m := "server.bytes_sent"
- g := GetOrRegisterGauge(m, nil)
- g.Update(47)
- fmt.Println(g.Value()) // Output: 47
-}
diff --git a/metrics/graphite.go b/metrics/graphite.go
index 142eec86b..aba752e0e 100644
--- a/metrics/graphite.go
+++ b/metrics/graphite.go
@@ -66,11 +66,15 @@ func graphite(c *GraphiteConfig) error {
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Snapshot().Count(), now)
+ case CounterFloat64:
+ fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Snapshot().Count(), now)
case Gauge:
- fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+ fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Snapshot().Value(), now)
case GaugeFloat64:
- fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+ fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Snapshot().Value(), now)
+ case GaugeInfo:
+ fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Snapshot().Value().String(), now)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles(c.Percentiles)
diff --git a/metrics/histogram.go b/metrics/histogram.go
index 46f3bbd2f..44de588bc 100644
--- a/metrics/histogram.go
+++ b/metrics/histogram.go
@@ -1,20 +1,14 @@
package metrics
+type HistogramSnapshot interface {
+ SampleSnapshot
+}
+
// Histograms calculate distribution statistics from a series of int64 values.
type Histogram interface {
Clear()
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Sample() Sample
- Snapshot() Histogram
- StdDev() float64
- Sum() int64
Update(int64)
- Variance() float64
+ Snapshot() HistogramSnapshot
}
// GetOrRegisterHistogram returns an existing Histogram or constructs and
@@ -26,6 +20,15 @@ func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
}
+// GetOrRegisterHistogramLazy returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogramLazy(name string, r Registry, s func() Sample) Histogram {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, func() Histogram { return NewHistogram(s()) }).(Histogram)
+}
+
// NewHistogram constructs a new StandardHistogram from a Sample.
func NewHistogram(s Sample) Histogram {
if !Enabled {
@@ -45,108 +48,12 @@ func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
return c
}
-// HistogramSnapshot is a read-only copy of another Histogram.
-type HistogramSnapshot struct {
- sample *SampleSnapshot
-}
-
-// Clear panics.
-func (*HistogramSnapshot) Clear() {
- panic("Clear called on a HistogramSnapshot")
-}
-
-// Count returns the number of samples recorded at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample at the time the snapshot
-// was taken.
-func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the sample
-// at the time the snapshot was taken.
-func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *HistogramSnapshot) Sample() Sample { return h.sample }
-
-// Snapshot returns the snapshot.
-func (h *HistogramSnapshot) Snapshot() Histogram { return h }
-
-// StdDev returns the standard deviation of the values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample at the time the snapshot was taken.
-func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
-
-// Update panics.
-func (*HistogramSnapshot) Update(int64) {
- panic("Update called on a HistogramSnapshot")
-}
-
-// Variance returns the variance of inputs at the time the snapshot was taken.
-func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
-
// NilHistogram is a no-op Histogram.
type NilHistogram struct{}
-// Clear is a no-op.
-func (NilHistogram) Clear() {}
-
-// Count is a no-op.
-func (NilHistogram) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilHistogram) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilHistogram) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilHistogram) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilHistogram) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Sample is a no-op.
-func (NilHistogram) Sample() Sample { return NilSample{} }
-
-// Snapshot is a no-op.
-func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
-
-// StdDev is a no-op.
-func (NilHistogram) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilHistogram) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilHistogram) Update(v int64) {}
-
-// Variance is a no-op.
-func (NilHistogram) Variance() float64 { return 0.0 }
+func (NilHistogram) Clear() {}
+func (NilHistogram) Snapshot() HistogramSnapshot { return (*emptySnapshot)(nil) }
+func (NilHistogram) Update(v int64) {}
// StandardHistogram is the standard implementation of a Histogram and uses a
// Sample to bound its memory use.
@@ -157,46 +64,10 @@ type StandardHistogram struct {
// Clear clears the histogram and its sample.
func (h *StandardHistogram) Clear() { h.sample.Clear() }
-// Count returns the number of samples recorded since the histogram was last
-// cleared.
-func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample.
-func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample.
-func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample.
-func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (h *StandardHistogram) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *StandardHistogram) Sample() Sample { return h.sample }
-
// Snapshot returns a read-only copy of the histogram.
-func (h *StandardHistogram) Snapshot() Histogram {
- return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+func (h *StandardHistogram) Snapshot() HistogramSnapshot {
+ return h.sample.Snapshot()
}
-// StdDev returns the standard deviation of the values in the sample.
-func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample.
-func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
-
// Update samples a new value.
func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
-
-// Variance returns the variance of the values in the sample.
-func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/metrics/histogram_test.go b/metrics/histogram_test.go
index d7f4f0171..22fc5468b 100644
--- a/metrics/histogram_test.go
+++ b/metrics/histogram_test.go
@@ -14,7 +14,7 @@ func TestGetOrRegisterHistogram(t *testing.T) {
r := NewRegistry()
s := NewUniformSample(100)
NewRegisteredHistogram("foo", r, s).Update(47)
- if h := GetOrRegisterHistogram("foo", r, s); 1 != h.Count() {
+ if h := GetOrRegisterHistogram("foo", r, s).Snapshot(); h.Count() != 1 {
t.Fatal(h)
}
}
@@ -24,34 +24,34 @@ func TestHistogram10000(t *testing.T) {
for i := 1; i <= 10000; i++ {
h.Update(int64(i))
}
- testHistogram10000(t, h)
+ testHistogram10000(t, h.Snapshot())
}
func TestHistogramEmpty(t *testing.T) {
- h := NewHistogram(NewUniformSample(100))
- if count := h.Count(); 0 != count {
+ h := NewHistogram(NewUniformSample(100)).Snapshot()
+ if count := h.Count(); count != 0 {
t.Errorf("h.Count(): 0 != %v\n", count)
}
- if min := h.Min(); 0 != min {
+ if min := h.Min(); min != 0 {
t.Errorf("h.Min(): 0 != %v\n", min)
}
- if max := h.Max(); 0 != max {
+ if max := h.Max(); max != 0 {
t.Errorf("h.Max(): 0 != %v\n", max)
}
- if mean := h.Mean(); 0.0 != mean {
+ if mean := h.Mean(); mean != 0.0 {
t.Errorf("h.Mean(): 0.0 != %v\n", mean)
}
- if stdDev := h.StdDev(); 0.0 != stdDev {
+ if stdDev := h.StdDev(); stdDev != 0.0 {
t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
}
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
- if 0.0 != ps[0] {
+ if ps[0] != 0.0 {
t.Errorf("median: 0.0 != %v\n", ps[0])
}
- if 0.0 != ps[1] {
+ if ps[1] != 0.0 {
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
}
- if 0.0 != ps[2] {
+ if ps[2] != 0.0 {
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
}
}
@@ -66,30 +66,30 @@ func TestHistogramSnapshot(t *testing.T) {
testHistogram10000(t, snapshot)
}
-func testHistogram10000(t *testing.T, h Histogram) {
- if count := h.Count(); 10000 != count {
+func testHistogram10000(t *testing.T, h HistogramSnapshot) {
+ if count := h.Count(); count != 10000 {
t.Errorf("h.Count(): 10000 != %v\n", count)
}
- if min := h.Min(); 1 != min {
+ if min := h.Min(); min != 1 {
t.Errorf("h.Min(): 1 != %v\n", min)
}
- if max := h.Max(); 10000 != max {
+ if max := h.Max(); max != 10000 {
t.Errorf("h.Max(): 10000 != %v\n", max)
}
- if mean := h.Mean(); 5000.5 != mean {
+ if mean := h.Mean(); mean != 5000.5 {
t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
}
- if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
+ if stdDev := h.StdDev(); stdDev != 2886.751331514372 {
t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
}
ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
- if 5000.5 != ps[0] {
+ if ps[0] != 5000.5 {
t.Errorf("median: 5000.5 != %v\n", ps[0])
}
- if 7500.75 != ps[1] {
+ if ps[1] != 7500.75 {
t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
}
- if 9900.99 != ps[2] {
+ if ps[2] != 9900.99 {
t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
}
}
diff --git a/metrics/inactive.go b/metrics/inactive.go
new file mode 100644
index 000000000..1f47f0210
--- /dev/null
+++ b/metrics/inactive.go
@@ -0,0 +1,48 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package metrics
+
+// compile-time checks that interfaces are implemented.
+var (
+ _ SampleSnapshot = (*emptySnapshot)(nil)
+ _ HistogramSnapshot = (*emptySnapshot)(nil)
+ _ CounterSnapshot = (*emptySnapshot)(nil)
+ _ GaugeSnapshot = (*emptySnapshot)(nil)
+ _ MeterSnapshot = (*emptySnapshot)(nil)
+ _ EWMASnapshot = (*emptySnapshot)(nil)
+ _ TimerSnapshot = (*emptySnapshot)(nil)
+)
+
+type emptySnapshot struct{}
+
+func (*emptySnapshot) Count() int64 { return 0 }
+func (*emptySnapshot) Max() int64 { return 0 }
+func (*emptySnapshot) Mean() float64 { return 0.0 }
+func (*emptySnapshot) Min() int64 { return 0 }
+func (*emptySnapshot) Percentile(p float64) float64 { return 0.0 }
+func (*emptySnapshot) Percentiles(ps []float64) []float64 { return make([]float64, len(ps)) }
+func (*emptySnapshot) Size() int { return 0 }
+func (*emptySnapshot) StdDev() float64 { return 0.0 }
+func (*emptySnapshot) Sum() int64 { return 0 }
+func (*emptySnapshot) Values() []int64 { return []int64{} }
+func (*emptySnapshot) Variance() float64 { return 0.0 }
+func (*emptySnapshot) Value() int64 { return 0 }
+func (*emptySnapshot) Rate() float64 { return 0.0 }
+func (*emptySnapshot) Rate1() float64 { return 0.0 }
+func (*emptySnapshot) Rate5() float64 { return 0.0 }
+func (*emptySnapshot) Rate15() float64 { return 0.0 }
+func (*emptySnapshot) RateMean() float64 { return 0.0 }
diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go
index 22e3365b7..4c9da3074 100644
--- a/metrics/influxdb/influxdb.go
+++ b/metrics/influxdb/influxdb.go
@@ -2,226 +2,118 @@ package influxdb
import (
"fmt"
- "log"
- uurl "net/url"
- "time"
- "github.com/influxdata/influxdb/client"
"github.com/tomochain/tomochain/metrics"
)
-type reporter struct {
- reg metrics.Registry
- interval time.Duration
-
- url uurl.URL
- database string
- username string
- password string
- namespace string
- tags map[string]string
-
- client *client.Client
-
- cache map[string]int64
-}
-
-// InfluxDB starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval.
-func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
- InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
-}
-
-// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
-func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
- u, err := uurl.Parse(url)
- if err != nil {
- log.Printf("unable to parse InfluxDB url %s. err=%v", url, err)
- return
- }
-
- rep := &reporter{
- reg: r,
- interval: d,
- url: *u,
- database: database,
- username: username,
- password: password,
- namespace: namespace,
- tags: tags,
- cache: make(map[string]int64),
- }
- if err := rep.makeClient(); err != nil {
- log.Printf("unable to make InfluxDB client. err=%v", err)
- return
- }
-
- rep.run()
-}
-
-func (r *reporter) makeClient() (err error) {
- r.client, err = client.NewClient(client.Config{
- URL: r.url,
- Username: r.username,
- Password: r.password,
- })
-
- return
-}
-
-func (r *reporter) run() {
- intervalTicker := time.Tick(r.interval)
- pingTicker := time.Tick(time.Second * 5)
-
- for {
- select {
- case <-intervalTicker:
- if err := r.send(); err != nil {
- log.Printf("unable to send to InfluxDB. err=%v", err)
- }
- case <-pingTicker:
- _, _, err := r.client.Ping()
- if err != nil {
- log.Printf("got error while sending a ping to InfluxDB, trying to recreate client. err=%v", err)
-
- if err = r.makeClient(); err != nil {
- log.Printf("unable to make InfluxDB client. err=%v", err)
- }
- }
+func readMeter(namespace, name string, i interface{}) (string, map[string]interface{}) {
+ switch metric := i.(type) {
+ case metrics.Counter:
+ measurement := fmt.Sprintf("%s%s.count", namespace, name)
+ fields := map[string]interface{}{
+ "value": metric.Snapshot().Count(),
}
- }
-}
-
-func (r *reporter) send() error {
- var pts []client.Point
-
- r.reg.Each(func(name string, i interface{}) {
- now := time.Now()
- namespace := r.namespace
-
- switch metric := i.(type) {
- case metrics.Counter:
- v := metric.Count()
- l := r.cache[name]
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.count", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": v - l,
- },
- Time: now,
- })
- r.cache[name] = v
- case metrics.Gauge:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": ms.Value(),
- },
- Time: now,
- })
- case metrics.GaugeFloat64:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": ms.Value(),
- },
- Time: now,
- })
- case metrics.Histogram:
- ms := metric.Snapshot()
- ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.histogram", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": ms.Count(),
- "max": ms.Max(),
- "mean": ms.Mean(),
- "min": ms.Min(),
- "stddev": ms.StdDev(),
- "variance": ms.Variance(),
- "p50": ps[0],
- "p75": ps[1],
- "p95": ps[2],
- "p99": ps[3],
- "p999": ps[4],
- "p9999": ps[5],
- },
- Time: now,
- })
- case metrics.Meter:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.meter", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": ms.Count(),
- "m1": ms.Rate1(),
- "m5": ms.Rate5(),
- "m15": ms.Rate15(),
- "mean": ms.RateMean(),
- },
- Time: now,
- })
- case metrics.Timer:
- ms := metric.Snapshot()
- ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.timer", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": ms.Count(),
- "max": ms.Max(),
- "mean": ms.Mean(),
- "min": ms.Min(),
- "stddev": ms.StdDev(),
- "variance": ms.Variance(),
- "p50": ps[0],
- "p75": ps[1],
- "p95": ps[2],
- "p99": ps[3],
- "p999": ps[4],
- "p9999": ps[5],
- "m1": ms.Rate1(),
- "m5": ms.Rate5(),
- "m15": ms.Rate15(),
- "meanrate": ms.RateMean(),
- },
- Time: now,
- })
- case metrics.ResettingTimer:
- t := metric.Snapshot()
-
- if len(t.Values()) > 0 {
- ps := t.Percentiles([]float64{50, 95, 99})
- val := t.Values()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.span", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": len(val),
- "max": val[len(val)-1],
- "mean": t.Mean(),
- "min": val[0],
- "p50": ps[0],
- "p95": ps[1],
- "p99": ps[2],
- },
- Time: now,
- })
- }
+ return measurement, fields
+ case metrics.CounterFloat64:
+ measurement := fmt.Sprintf("%s%s.count", namespace, name)
+ fields := map[string]interface{}{
+ "value": metric.Snapshot().Count(),
}
- })
-
- bps := client.BatchPoints{
- Points: pts,
- Database: r.database,
+ return measurement, fields
+ case metrics.Gauge:
+ measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
+ fields := map[string]interface{}{
+ "value": metric.Snapshot().Value(),
+ }
+ return measurement, fields
+ case metrics.GaugeFloat64:
+ measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
+ fields := map[string]interface{}{
+ "value": metric.Snapshot().Value(),
+ }
+ return measurement, fields
+ case metrics.GaugeInfo:
+ ms := metric.Snapshot()
+ measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
+ fields := map[string]interface{}{
+ "value": ms.Value().String(),
+ }
+ return measurement, fields
+ case metrics.Histogram:
+ ms := metric.Snapshot()
+ if ms.Count() <= 0 {
+ break
+ }
+ ps := ms.Percentiles([]float64{0.25, 0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
+ measurement := fmt.Sprintf("%s%s.histogram", namespace, name)
+ fields := map[string]interface{}{
+ "count": ms.Count(),
+ "max": ms.Max(),
+ "mean": ms.Mean(),
+ "min": ms.Min(),
+ "stddev": ms.StdDev(),
+ "variance": ms.Variance(),
+ "p25": ps[0],
+ "p50": ps[1],
+ "p75": ps[2],
+ "p95": ps[3],
+ "p99": ps[4],
+ "p999": ps[5],
+ "p9999": ps[6],
+ }
+ return measurement, fields
+ case metrics.Meter:
+ ms := metric.Snapshot()
+ measurement := fmt.Sprintf("%s%s.meter", namespace, name)
+ fields := map[string]interface{}{
+ "count": ms.Count(),
+ "m1": ms.Rate1(),
+ "m5": ms.Rate5(),
+ "m15": ms.Rate15(),
+ "mean": ms.RateMean(),
+ }
+ return measurement, fields
+ case metrics.Timer:
+ ms := metric.Snapshot()
+ ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
+
+ measurement := fmt.Sprintf("%s%s.timer", namespace, name)
+ fields := map[string]interface{}{
+ "count": ms.Count(),
+ "max": ms.Max(),
+ "mean": ms.Mean(),
+ "min": ms.Min(),
+ "stddev": ms.StdDev(),
+ "variance": ms.Variance(),
+ "p50": ps[0],
+ "p75": ps[1],
+ "p95": ps[2],
+ "p99": ps[3],
+ "p999": ps[4],
+ "p9999": ps[5],
+ "m1": ms.Rate1(),
+ "m5": ms.Rate5(),
+ "m15": ms.Rate15(),
+ "meanrate": ms.RateMean(),
+ }
+ return measurement, fields
+ case metrics.ResettingTimer:
+ t := metric.Snapshot()
+ if t.Count() == 0 {
+ break
+ }
+ ps := t.Percentiles([]float64{0.50, 0.95, 0.99})
+ measurement := fmt.Sprintf("%s%s.span", namespace, name)
+ fields := map[string]interface{}{
+ "count": t.Count(),
+ "max": t.Max(),
+ "mean": t.Mean(),
+ "min": t.Min(),
+ "p50": int(ps[0]),
+ "p95": int(ps[1]),
+ "p99": int(ps[2]),
+ }
+ return measurement, fields
}
-
- _, err := r.client.Write(bps)
- return err
+ return "", nil
}
diff --git a/metrics/influxdb/influxdb_test.go b/metrics/influxdb/influxdb_test.go
new file mode 100644
index 000000000..1b2529bdb
--- /dev/null
+++ b/metrics/influxdb/influxdb_test.go
@@ -0,0 +1,114 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package influxdb
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "strings"
+ "testing"
+
+ influxdb2 "github.com/influxdata/influxdb-client-go/v2"
+ "github.com/tomochain/tomochain/metrics"
+ "github.com/tomochain/tomochain/metrics/internal"
+)
+
+func TestMain(m *testing.M) {
+ metrics.Enabled = true
+ os.Exit(m.Run())
+}
+
+func TestExampleV1(t *testing.T) {
+ r := internal.ExampleMetrics()
+ var have, want string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ haveB, _ := io.ReadAll(r.Body)
+ have = string(haveB)
+ r.Body.Close()
+ }))
+ defer ts.Close()
+ u, _ := url.Parse(ts.URL)
+ rep := &reporter{
+ reg: r,
+ url: *u,
+ namespace: "goth.",
+ }
+ if err := rep.makeClient(); err != nil {
+ t.Fatal(err)
+ }
+ if err := rep.send(978307200); err != nil {
+ t.Fatal(err)
+ }
+ if wantB, err := os.ReadFile("./testdata/influxdbv1.want"); err != nil {
+ t.Fatal(err)
+ } else {
+ want = string(wantB)
+ }
+ if have != want {
+ t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want)
+ t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
+ }
+}
+
+func TestExampleV2(t *testing.T) {
+ r := internal.ExampleMetrics()
+ var have, want string
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ haveB, _ := io.ReadAll(r.Body)
+ have = string(haveB)
+ r.Body.Close()
+ }))
+ defer ts.Close()
+
+ rep := &v2Reporter{
+ reg: r,
+ endpoint: ts.URL,
+ namespace: "goth.",
+ }
+ rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
+ defer rep.client.Close()
+ rep.write = rep.client.WriteAPI(rep.organization, rep.bucket)
+
+ rep.send(978307200)
+
+ if wantB, err := os.ReadFile("./testdata/influxdbv2.want"); err != nil {
+ t.Fatal(err)
+ } else {
+ want = string(wantB)
+ }
+ if have != want {
+ t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want)
+ t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
+ }
+}
+
+func findFirstDiffPos(a, b string) string {
+ yy := strings.Split(b, "\n")
+ for i, x := range strings.Split(a, "\n") {
+ if i >= len(yy) {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: ", i, x, i)
+ }
+ if y := yy[i]; x != y {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y)
+ }
+ }
+ return ""
+}
diff --git a/metrics/influxdb/influxdbv1.go b/metrics/influxdb/influxdbv1.go
new file mode 100644
index 000000000..607840cd8
--- /dev/null
+++ b/metrics/influxdb/influxdbv1.go
@@ -0,0 +1,153 @@
+package influxdb
+
+import (
+ "fmt"
+ uurl "net/url"
+ "time"
+
+ client "github.com/influxdata/influxdb1-client/v2"
+
+ "github.com/tomochain/tomochain/log"
+ "github.com/tomochain/tomochain/metrics"
+)
+
+type reporter struct {
+ reg metrics.Registry
+ interval time.Duration
+
+ url uurl.URL
+ database string
+ username string
+ password string
+ namespace string
+ tags map[string]string
+
+ client client.Client
+
+ cache map[string]int64
+}
+
+// InfluxDB starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval.
+func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
+ InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
+}
+
+// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
+func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
+ u, err := uurl.Parse(url)
+ if err != nil {
+ log.Warn("Unable to parse InfluxDB", "url", url, "err", err)
+ return
+ }
+
+ rep := &reporter{
+ reg: r,
+ interval: d,
+ url: *u,
+ database: database,
+ username: username,
+ password: password,
+ namespace: namespace,
+ tags: tags,
+ cache: make(map[string]int64),
+ }
+ if err := rep.makeClient(); err != nil {
+ log.Warn("Unable to make InfluxDB client", "err", err)
+ return
+ }
+
+ rep.run()
+}
+
+// InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags
+func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error {
+ u, err := uurl.Parse(url)
+ if err != nil {
+ return fmt.Errorf("unable to parse InfluxDB. url: %s, err: %v", url, err)
+ }
+
+ rep := &reporter{
+ reg: r,
+ url: *u,
+ database: database,
+ username: username,
+ password: password,
+ namespace: namespace,
+ tags: tags,
+ cache: make(map[string]int64),
+ }
+ if err := rep.makeClient(); err != nil {
+ return fmt.Errorf("unable to make InfluxDB client. err: %v", err)
+ }
+
+ if err := rep.send(0); err != nil {
+ return fmt.Errorf("unable to send to InfluxDB. err: %v", err)
+ }
+
+ return nil
+}
+
+func (r *reporter) makeClient() (err error) {
+ r.client, err = client.NewHTTPClient(client.HTTPConfig{
+ Addr: r.url.String(),
+ Username: r.username,
+ Password: r.password,
+ Timeout: 10 * time.Second,
+ })
+
+ return
+}
+
+func (r *reporter) run() {
+ intervalTicker := time.NewTicker(r.interval)
+ pingTicker := time.NewTicker(time.Second * 5)
+
+ defer intervalTicker.Stop()
+ defer pingTicker.Stop()
+
+ for {
+ select {
+ case <-intervalTicker.C:
+ if err := r.send(0); err != nil {
+ log.Warn("Unable to send to InfluxDB", "err", err)
+ }
+ case <-pingTicker.C:
+ _, _, err := r.client.Ping(0)
+ if err != nil {
+ log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
+
+ if err = r.makeClient(); err != nil {
+ log.Warn("Unable to make InfluxDB client", "err", err)
+ }
+ }
+ }
+ }
+}
+
+// send sends the measurements. If provided tstamp is >0, it is used. Otherwise,
+// a 'fresh' timestamp is used.
+func (r *reporter) send(tstamp int64) error {
+ bps, err := client.NewBatchPoints(
+ client.BatchPointsConfig{
+ Database: r.database,
+ })
+ if err != nil {
+ return err
+ }
+ r.reg.Each(func(name string, i interface{}) {
+ var now time.Time
+ if tstamp <= 0 {
+ now = time.Now()
+ } else {
+ now = time.Unix(tstamp, 0)
+ }
+ measurement, fields := readMeter(r.namespace, name, i)
+ if fields == nil {
+ return
+ }
+ if p, err := client.NewPoint(measurement, r.tags, fields, now); err == nil {
+ bps.AddPoint(p)
+ }
+ })
+ return r.client.Write(bps)
+}
diff --git a/metrics/influxdb/influxdbv2.go b/metrics/influxdb/influxdbv2.go
new file mode 100644
index 000000000..e73d2f0ba
--- /dev/null
+++ b/metrics/influxdb/influxdbv2.go
@@ -0,0 +1,97 @@
+package influxdb
+
+import (
+ "context"
+ "time"
+
+ influxdb2 "github.com/influxdata/influxdb-client-go/v2"
+ "github.com/influxdata/influxdb-client-go/v2/api"
+
+ "github.com/tomochain/tomochain/log"
+ "github.com/tomochain/tomochain/metrics"
+)
+
+type v2Reporter struct {
+ reg metrics.Registry
+ interval time.Duration
+
+ endpoint string
+ token string
+ bucket string
+ organization string
+ namespace string
+ tags map[string]string
+
+ client influxdb2.Client
+ write api.WriteAPI
+}
+
+// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
+func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
+ rep := &v2Reporter{
+ reg: r,
+ interval: d,
+ endpoint: endpoint,
+ token: token,
+ bucket: bucket,
+ organization: organization,
+ namespace: namespace,
+ tags: tags,
+ }
+
+ rep.client = influxdb2.NewClient(rep.endpoint, rep.token)
+ defer rep.client.Close()
+
+ // async write client
+ rep.write = rep.client.WriteAPI(rep.organization, rep.bucket)
+ errorsCh := rep.write.Errors()
+
+ // have to handle write errors in a separate goroutine like this b/c the channel is unbuffered and will block writes if not read
+ go func() {
+ for err := range errorsCh {
+ log.Warn("write error", "err", err.Error())
+ }
+ }()
+ rep.run()
+}
+
+func (r *v2Reporter) run() {
+ intervalTicker := time.NewTicker(r.interval)
+ pingTicker := time.NewTicker(time.Second * 5)
+
+ defer intervalTicker.Stop()
+ defer pingTicker.Stop()
+
+ for {
+ select {
+ case <-intervalTicker.C:
+ r.send(0)
+ case <-pingTicker.C:
+ _, err := r.client.Health(context.Background())
+ if err != nil {
+ log.Warn("Got error from influxdb client health check", "err", err.Error())
+ }
+ }
+ }
+}
+
+// send sends the measurements. If provided tstamp is >0, it is used. Otherwise,
+// a 'fresh' timestamp is used.
+func (r *v2Reporter) send(tstamp int64) {
+ r.reg.Each(func(name string, i interface{}) {
+ var now time.Time
+ if tstamp <= 0 {
+ now = time.Now()
+ } else {
+ now = time.Unix(tstamp, 0)
+ }
+ measurement, fields := readMeter(r.namespace, name, i)
+ if fields == nil {
+ return
+ }
+ pt := influxdb2.NewPoint(measurement, r.tags, fields, now)
+ r.write.WritePoint(pt)
+ })
+ // Force all unwritten data to be sent
+ r.write.Flush()
+}
diff --git a/metrics/influxdb/testdata/influxdbv1.want b/metrics/influxdb/testdata/influxdbv1.want
new file mode 100644
index 000000000..9443faedc
--- /dev/null
+++ b/metrics/influxdb/testdata/influxdbv1.want
@@ -0,0 +1,11 @@
+goth.system/cpu/schedlatency.histogram count=5645i,max=41943040i,mean=1819544.0410983171,min=0i,p25=0,p50=0,p75=7168,p95=16777216,p99=29360128,p999=33554432,p9999=33554432,stddev=6393570.217198883,variance=40877740122252.57 978307200000000000
+goth.system/memory/pauses.histogram count=14i,max=229376i,mean=50066.28571428572,min=5120i,p25=10240,p50=32768,p75=57344,p95=196608,p99=196608,p999=196608,p9999=196608,stddev=54726.062410783874,variance=2994941906.9890113 978307200000000000
+goth.test/counter.count value=12345 978307200000000000
+goth.test/counter_float64.count value=54321.98 978307200000000000
+goth.test/gauge.gauge value=23456i 978307200000000000
+goth.test/gauge_float64.gauge value=34567.89 978307200000000000
+goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000
+goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000
+goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000
+goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000
+goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000
diff --git a/metrics/influxdb/testdata/influxdbv2.want b/metrics/influxdb/testdata/influxdbv2.want
new file mode 100644
index 000000000..9443faedc
--- /dev/null
+++ b/metrics/influxdb/testdata/influxdbv2.want
@@ -0,0 +1,11 @@
+goth.system/cpu/schedlatency.histogram count=5645i,max=41943040i,mean=1819544.0410983171,min=0i,p25=0,p50=0,p75=7168,p95=16777216,p99=29360128,p999=33554432,p9999=33554432,stddev=6393570.217198883,variance=40877740122252.57 978307200000000000
+goth.system/memory/pauses.histogram count=14i,max=229376i,mean=50066.28571428572,min=5120i,p25=10240,p50=32768,p75=57344,p95=196608,p99=196608,p999=196608,p9999=196608,stddev=54726.062410783874,variance=2994941906.9890113 978307200000000000
+goth.test/counter.count value=12345 978307200000000000
+goth.test/counter_float64.count value=54321.98 978307200000000000
+goth.test/gauge.gauge value=23456i 978307200000000000
+goth.test/gauge_float64.gauge value=34567.89 978307200000000000
+goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000
+goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000
+goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000
+goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000
+goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000
diff --git a/metrics/internal/sampledata.go b/metrics/internal/sampledata.go
new file mode 100644
index 000000000..e8b6f2148
--- /dev/null
+++ b/metrics/internal/sampledata.go
@@ -0,0 +1,95 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package internal
+
+import (
+ "bytes"
+ "encoding/gob"
+ metrics2 "runtime/metrics"
+ "time"
+
+ "github.com/tomochain/tomochain/metrics"
+)
+
+// ExampleMetrics returns an ordered registry populated with a sample of metrics.
+func ExampleMetrics() metrics.Registry {
+ var registry = metrics.NewOrderedRegistry()
+
+ metrics.NewRegisteredCounterFloat64("test/counter", registry).Inc(12345)
+ metrics.NewRegisteredCounterFloat64("test/counter_float64", registry).Inc(54321.98)
+ metrics.NewRegisteredGauge("test/gauge", registry).Update(23456)
+ metrics.NewRegisteredGaugeFloat64("test/gauge_float64", registry).Update(34567.89)
+ metrics.NewRegisteredGaugeInfo("test/gauge_info", registry).Update(
+ metrics.GaugeInfoValue{
+ "version": "1.10.18-unstable",
+ "arch": "amd64",
+ "os": "linux",
+ "commit": "7caa2d8163ae3132c1c2d6978c76610caee2d949",
+ "protocol_versions": "64 65 66",
+ })
+
+ {
+ s := metrics.NewUniformSample(3)
+ s.Update(1)
+ s.Update(2)
+ s.Update(3)
+ //metrics.NewRegisteredHistogram("test/histogram", registry, metrics.NewSampleSnapshot(3, []int64{1, 2, 3}))
+ metrics.NewRegisteredHistogram("test/histogram", registry, s)
+ }
+ registry.Register("test/meter", metrics.NewInactiveMeter())
+ {
+ timer := metrics.NewRegisteredResettingTimer("test/resetting_timer", registry)
+ timer.Update(10 * time.Millisecond)
+ timer.Update(11 * time.Millisecond)
+ timer.Update(12 * time.Millisecond)
+ timer.Update(120 * time.Millisecond)
+ timer.Update(13 * time.Millisecond)
+ timer.Update(14 * time.Millisecond)
+ }
+ {
+ timer := metrics.NewRegisteredTimer("test/timer", registry)
+ timer.Update(20 * time.Millisecond)
+ timer.Update(21 * time.Millisecond)
+ timer.Update(22 * time.Millisecond)
+ timer.Update(120 * time.Millisecond)
+ timer.Update(23 * time.Millisecond)
+ timer.Update(24 * time.Millisecond)
+ timer.Stop()
+ }
+ registry.Register("test/empty_resetting_timer", metrics.NewResettingTimer().Snapshot())
+
+ { // go runtime metrics
+ var sLatency = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06T\xff\x82\x01\xff\xa2\x00\xfe\r\xef\x00\x01\x02\x02\x04\x05\x04\b\x15\x17 B?6.L;$!2) \x1a? \x190aH7FY6#\x190\x1d\x14\x10\x1b\r\t\x04\x03\x01\x01\x00\x03\x02\x00\x03\x05\x05\x02\x02\x06\x04\v\x06\n\x15\x18\x13'&.\x12=H/L&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
+ var gcPauses = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06R\xff\x82\x01\xff\xa2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x00\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
+
+ var secondsToNs = float64(time.Second)
+
+ dserialize := func(data string) *metrics2.Float64Histogram {
+ var res metrics2.Float64Histogram
+ if err := gob.NewDecoder(bytes.NewReader([]byte(data))).Decode(&res); err != nil {
+ panic(err)
+ }
+ return &res
+ }
+ cpuSchedLatency := metrics.RuntimeHistogramFromData(secondsToNs, dserialize(sLatency))
+ registry.Register("system/cpu/schedlatency", cpuSchedLatency)
+
+ memPauses := metrics.RuntimeHistogramFromData(secondsToNs, dserialize(gcPauses))
+ registry.Register("system/memory/pauses", memPauses)
+ }
+ return registry
+}
diff --git a/metrics/internal/sampledata_test.go b/metrics/internal/sampledata_test.go
new file mode 100644
index 000000000..790da102f
--- /dev/null
+++ b/metrics/internal/sampledata_test.go
@@ -0,0 +1,27 @@
+package internal
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ metrics2 "runtime/metrics"
+ "testing"
+ "time"
+
+ "github.com/tomochain/tomochain/metrics"
+)
+
+func TestCollectRuntimeMetrics(t *testing.T) {
+ t.Skip("Only used for generating testdata")
+ serialize := func(path string, histogram *metrics2.Float64Histogram) {
+ var f = new(bytes.Buffer)
+ if err := gob.NewEncoder(f).Encode(histogram); err != nil {
+ panic(err)
+ }
+ fmt.Printf("var %v = %q\n", path, f.Bytes())
+ }
+ time.Sleep(2 * time.Second)
+ stats := metrics.ReadRuntimeStats()
+ serialize("schedlatency", stats.SchedLatency)
+ serialize("gcpauses", stats.GCPauses)
+}
diff --git a/metrics/json_test.go b/metrics/json_test.go
index cf70051f7..f91fe8cfa 100644
--- a/metrics/json_test.go
+++ b/metrics/json_test.go
@@ -12,7 +12,7 @@ func TestRegistryMarshallJSON(t *testing.T) {
r := NewRegistry()
r.Register("counter", NewCounter())
enc.Encode(r)
- if s := b.String(); "{\"counter\":{\"count\":0}}\n" != s {
+ if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
t.Fatalf(s)
}
}
diff --git a/metrics/librato/client.go b/metrics/librato/client.go
index 8c0c850e3..f1b9e1e91 100644
--- a/metrics/librato/client.go
+++ b/metrics/librato/client.go
@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
)
@@ -65,7 +65,7 @@ type Batch struct {
Source string `json:"source"`
}
-func (self *LibratoClient) PostMetrics(batch Batch) (err error) {
+func (c *LibratoClient) PostMetrics(batch Batch) (err error) {
var (
js []byte
req *http.Request
@@ -80,23 +80,25 @@ func (self *LibratoClient) PostMetrics(batch Batch) (err error) {
return
}
- if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
+ if req, err = http.NewRequest(http.MethodPost, MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
return
}
req.Header.Set("Content-Type", "application/json")
- req.SetBasicAuth(self.Email, self.Token)
+ req.SetBasicAuth(c.Email, c.Token)
- if resp, err = http.DefaultClient.Do(req); err != nil {
+ resp, err = http.DefaultClient.Do(req)
+ if err != nil {
return
}
+ defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
var body []byte
- if body, err = ioutil.ReadAll(resp.Body); err != nil {
+ if body, err = io.ReadAll(resp.Body); err != nil {
body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
}
- err = fmt.Errorf("Unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
+ err = fmt.Errorf("unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
}
return
}
diff --git a/metrics/librato/librato.go b/metrics/librato/librato.go
index 23ebd71f5..ea545c2fd 100644
--- a/metrics/librato/librato.go
+++ b/metrics/librato/librato.go
@@ -40,14 +40,15 @@ func Librato(r metrics.Registry, d time.Duration, e string, t string, s string,
NewReporter(r, d, e, t, s, p, u).Run()
}
-func (self *Reporter) Run() {
+func (rep *Reporter) Run() {
log.Printf("WARNING: This client has been DEPRECATED! It has been moved to https://github.com/mihasya/go-metrics-librato and will be removed from rcrowley/go-metrics on August 5th 2015")
- ticker := time.Tick(self.Interval)
- metricsApi := &LibratoClient{self.Email, self.Token}
- for now := range ticker {
+ ticker := time.NewTicker(rep.Interval)
+ defer ticker.Stop()
+ metricsApi := &LibratoClient{rep.Email, rep.Token}
+ for now := range ticker.C {
var metrics Batch
var err error
- if metrics, err = self.BuildRequest(now, self.Registry); err != nil {
+ if metrics, err = rep.BuildRequest(now, rep.Registry); err != nil {
log.Printf("ERROR constructing librato request body %s", err)
continue
}
@@ -60,16 +61,16 @@ func (self *Reporter) Run() {
// calculate sum of squares from data provided by metrics.Histogram
// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
-func sumSquares(s metrics.Sample) float64 {
- count := float64(s.Count())
- sumSquared := math.Pow(count*s.Mean(), 2)
- sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
+func sumSquares(icount int64, mean, stDev float64) float64 {
+ count := float64(icount)
+ sumSquared := math.Pow(count*mean, 2)
+ sumSquares := math.Pow(count*stDev, 2) + sumSquared/count
if math.IsNaN(sumSquares) {
return 0.0
}
return sumSquares
}
-func sumSquaresTimer(t metrics.Timer) float64 {
+func sumSquaresTimer(t metrics.TimerSnapshot) float64 {
count := float64(t.Count())
sumSquared := math.Pow(count*t.Mean(), 2)
sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
@@ -79,26 +80,38 @@ func sumSquaresTimer(t metrics.Timer) float64 {
return sumSquares
}
-func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
+func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
snapshot = Batch{
// coerce timestamps to a stepping fn so that they line up in Librato graphs
- MeasureTime: (now.Unix() / self.intervalSec) * self.intervalSec,
- Source: self.Source,
+ MeasureTime: (now.Unix() / rep.intervalSec) * rep.intervalSec,
+ Source: rep.Source,
}
snapshot.Gauges = make([]Measurement, 0)
snapshot.Counters = make([]Measurement, 0)
- histogramGaugeCount := 1 + len(self.Percentiles)
+ histogramGaugeCount := 1 + len(rep.Percentiles)
r.Each(func(name string, metric interface{}) {
- if self.Namespace != "" {
- name = fmt.Sprintf("%s.%s", self.Namespace, name)
+ if rep.Namespace != "" {
+ name = fmt.Sprintf("%s.%s", rep.Namespace, name)
}
measurement := Measurement{}
- measurement[Period] = self.Interval.Seconds()
+ measurement[Period] = rep.Interval.Seconds()
switch m := metric.(type) {
case metrics.Counter:
- if m.Count() > 0 {
+ ms := m.Snapshot()
+ if ms.Count() > 0 {
measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
- measurement[Value] = float64(m.Count())
+ measurement[Value] = float64(ms.Count())
+ measurement[Attributes] = map[string]interface{}{
+ DisplayUnitsLong: Operations,
+ DisplayUnitsShort: OperationsShort,
+ DisplayMin: "0",
+ }
+ snapshot.Counters = append(snapshot.Counters, measurement)
+ }
+ case metrics.CounterFloat64:
+ if count := m.Snapshot().Count(); count > 0 {
+ measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
+ measurement[Value] = count
measurement[Attributes] = map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@@ -108,41 +121,46 @@ func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot
}
case metrics.Gauge:
measurement[Name] = name
- measurement[Value] = float64(m.Value())
+ measurement[Value] = float64(m.Snapshot().Value())
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.GaugeFloat64:
measurement[Name] = name
- measurement[Value] = m.Value()
+ measurement[Value] = m.Snapshot().Value()
+ snapshot.Gauges = append(snapshot.Gauges, measurement)
+ case metrics.GaugeInfo:
+ measurement[Name] = name
+ measurement[Value] = m.Snapshot().Value()
snapshot.Gauges = append(snapshot.Gauges, measurement)
case metrics.Histogram:
- if m.Count() > 0 {
+ ms := m.Snapshot()
+ if ms.Count() > 0 {
gauges := make([]Measurement, histogramGaugeCount)
- s := m.Sample()
measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
- measurement[Count] = uint64(s.Count())
- measurement[Max] = float64(s.Max())
- measurement[Min] = float64(s.Min())
- measurement[Sum] = float64(s.Sum())
- measurement[SumSquares] = sumSquares(s)
+ measurement[Count] = uint64(ms.Count())
+ measurement[Max] = float64(ms.Max())
+ measurement[Min] = float64(ms.Min())
+ measurement[Sum] = float64(ms.Sum())
+ measurement[SumSquares] = sumSquares(ms.Count(), ms.Mean(), ms.StdDev())
gauges[0] = measurement
- for i, p := range self.Percentiles {
+ for i, p := range rep.Percentiles {
gauges[i+1] = Measurement{
Name: fmt.Sprintf("%s.%.2f", measurement[Name], p),
- Value: s.Percentile(p),
+ Value: ms.Percentile(p),
Period: measurement[Period],
}
}
snapshot.Gauges = append(snapshot.Gauges, gauges...)
}
case metrics.Meter:
+ ms := m.Snapshot()
measurement[Name] = name
- measurement[Value] = float64(m.Count())
+ measurement[Value] = float64(ms.Count())
snapshot.Counters = append(snapshot.Counters, measurement)
snapshot.Gauges = append(snapshot.Gauges,
Measurement{
Name: fmt.Sprintf("%s.%s", name, "1min"),
- Value: m.Rate1(),
- Period: int64(self.Interval.Seconds()),
+ Value: ms.Rate1(),
+ Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@@ -151,8 +169,8 @@ func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "5min"),
- Value: m.Rate5(),
- Period: int64(self.Interval.Seconds()),
+ Value: ms.Rate5(),
+ Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@@ -161,8 +179,8 @@ func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "15min"),
- Value: m.Rate15(),
- Period: int64(self.Interval.Seconds()),
+ Value: ms.Rate15(),
+ Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@@ -171,36 +189,37 @@ func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot
},
)
case metrics.Timer:
+ ms := m.Snapshot()
measurement[Name] = name
- measurement[Value] = float64(m.Count())
+ measurement[Value] = float64(ms.Count())
snapshot.Counters = append(snapshot.Counters, measurement)
- if m.Count() > 0 {
+ if ms.Count() > 0 {
libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
gauges := make([]Measurement, histogramGaugeCount)
gauges[0] = Measurement{
Name: libratoName,
- Count: uint64(m.Count()),
- Sum: m.Mean() * float64(m.Count()),
- Max: float64(m.Max()),
- Min: float64(m.Min()),
- SumSquares: sumSquaresTimer(m),
- Period: int64(self.Interval.Seconds()),
- Attributes: self.TimerAttributes,
+ Count: uint64(ms.Count()),
+ Sum: ms.Mean() * float64(ms.Count()),
+ Max: float64(ms.Max()),
+ Min: float64(ms.Min()),
+ SumSquares: sumSquaresTimer(ms),
+ Period: int64(rep.Interval.Seconds()),
+ Attributes: rep.TimerAttributes,
}
- for i, p := range self.Percentiles {
+ for i, p := range rep.Percentiles {
gauges[i+1] = Measurement{
Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100),
- Value: m.Percentile(p),
- Period: int64(self.Interval.Seconds()),
- Attributes: self.TimerAttributes,
+ Value: ms.Percentile(p),
+ Period: int64(rep.Interval.Seconds()),
+ Attributes: rep.TimerAttributes,
}
}
snapshot.Gauges = append(snapshot.Gauges, gauges...)
snapshot.Gauges = append(snapshot.Gauges,
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.1min"),
- Value: m.Rate1(),
- Period: int64(self.Interval.Seconds()),
+ Value: ms.Rate1(),
+ Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@@ -209,8 +228,8 @@ func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.5min"),
- Value: m.Rate5(),
- Period: int64(self.Interval.Seconds()),
+ Value: ms.Rate5(),
+ Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
@@ -219,8 +238,8 @@ func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot
},
Measurement{
Name: fmt.Sprintf("%s.%s", name, "rate.15min"),
- Value: m.Rate15(),
- Period: int64(self.Interval.Seconds()),
+ Value: ms.Rate15(),
+ Period: int64(rep.Interval.Seconds()),
Attributes: map[string]interface{}{
DisplayUnitsLong: Operations,
DisplayUnitsShort: OperationsShort,
diff --git a/metrics/log.go b/metrics/log.go
index 0c8ea7c97..3b9773faa 100644
--- a/metrics/log.go
+++ b/metrics/log.go
@@ -23,13 +23,19 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
switch metric := i.(type) {
case Counter:
l.Printf("counter %s\n", name)
- l.Printf(" count: %9d\n", metric.Count())
+ l.Printf(" count: %9d\n", metric.Snapshot().Count())
+ case CounterFloat64:
+ l.Printf("counter %s\n", name)
+ l.Printf(" count: %f\n", metric.Snapshot().Count())
case Gauge:
l.Printf("gauge %s\n", name)
- l.Printf(" value: %9d\n", metric.Value())
+ l.Printf(" value: %9d\n", metric.Snapshot().Value())
case GaugeFloat64:
l.Printf("gauge %s\n", name)
- l.Printf(" value: %f\n", metric.Value())
+ l.Printf(" value: %f\n", metric.Snapshot().Value())
+ case GaugeInfo:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %s\n", metric.Snapshot().Value())
case Healthcheck:
metric.Check()
l.Printf("healthcheck %s\n", name)
diff --git a/metrics/meter.go b/metrics/meter.go
index 82b2141a6..22475ef6e 100644
--- a/metrics/meter.go
+++ b/metrics/meter.go
@@ -1,20 +1,25 @@
package metrics
import (
+ "math"
"sync"
+ "sync/atomic"
"time"
)
-// Meters count events to produce exponentially-weighted moving average rates
-// at one-, five-, and fifteen-minutes and a mean rate.
-type Meter interface {
+type MeterSnapshot interface {
Count() int64
- Mark(int64)
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
- Snapshot() Meter
+}
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+ Mark(int64)
+ Snapshot() MeterSnapshot
Stop()
}
@@ -46,94 +51,70 @@ func NewMeter() Meter {
return m
}
-// NewMeter constructs and registers a new StandardMeter and launches a
-// goroutine.
+// NewInactiveMeter returns a meter but does not start any goroutines. This
+// method is mainly intended for testing.
+func NewInactiveMeter() Meter {
+ if !Enabled {
+ return NilMeter{}
+ }
+ m := newStandardMeter()
+ return m
+}
+
+// NewRegisteredMeter constructs and registers a new StandardMeter
+// and launches a goroutine.
// Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection.
func NewRegisteredMeter(name string, r Registry) Meter {
- c := NewMeter()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
+ return GetOrRegisterMeter(name, r)
}
-// MeterSnapshot is a read-only copy of another Meter.
-type MeterSnapshot struct {
+// meterSnapshot is a read-only copy of the meter's internal values.
+type meterSnapshot struct {
count int64
rate1, rate5, rate15, rateMean float64
}
// Count returns the count of events at the time the snapshot was taken.
-func (m *MeterSnapshot) Count() int64 { return m.count }
-
-// Mark panics.
-func (*MeterSnapshot) Mark(n int64) {
- panic("Mark called on a MeterSnapshot")
-}
+func (m *meterSnapshot) Count() int64 { return m.count }
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
-func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+func (m *meterSnapshot) Rate1() float64 { return m.rate1 }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
-func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+func (m *meterSnapshot) Rate5() float64 { return m.rate5 }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
-func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+func (m *meterSnapshot) Rate15() float64 { return m.rate15 }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
-func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
-
-// Snapshot returns the snapshot.
-func (m *MeterSnapshot) Snapshot() Meter { return m }
-
-// Stop is a no-op.
-func (m *MeterSnapshot) Stop() {}
+func (m *meterSnapshot) RateMean() float64 { return m.rateMean }
// NilMeter is a no-op Meter.
type NilMeter struct{}
-// Count is a no-op.
-func (NilMeter) Count() int64 { return 0 }
-
-// Mark is a no-op.
-func (NilMeter) Mark(n int64) {}
-
-// Rate1 is a no-op.
-func (NilMeter) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilMeter) Rate5() float64 { return 0.0 }
-
-// Rate15is a no-op.
-func (NilMeter) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilMeter) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilMeter) Snapshot() Meter { return NilMeter{} }
-
-// Stop is a no-op.
-func (NilMeter) Stop() {}
+func (NilMeter) Count() int64 { return 0 }
+func (NilMeter) Mark(n int64) {}
+func (NilMeter) Snapshot() MeterSnapshot { return (*emptySnapshot)(nil) }
+func (NilMeter) Stop() {}
// StandardMeter is the standard implementation of a Meter.
type StandardMeter struct {
- lock sync.RWMutex
- snapshot *MeterSnapshot
+ count atomic.Int64
+ uncounted atomic.Int64 // not yet added to the EWMAs
+ rateMean atomic.Uint64
+
a1, a5, a15 EWMA
startTime time.Time
- stopped bool
+ stopped atomic.Bool
}
func newStandardMeter() *StandardMeter {
return &StandardMeter{
- snapshot: &MeterSnapshot{},
a1: NewEWMA1(),
a5: NewEWMA5(),
a15: NewEWMA15(),
@@ -143,95 +124,42 @@ func newStandardMeter() *StandardMeter {
// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
func (m *StandardMeter) Stop() {
- m.lock.Lock()
- stopped := m.stopped
- m.stopped = true
- m.lock.Unlock()
- if !stopped {
+ if stopped := m.stopped.Swap(true); !stopped {
arbiter.Lock()
delete(arbiter.meters, m)
arbiter.Unlock()
}
}
-// Count returns the number of events recorded.
-func (m *StandardMeter) Count() int64 {
- m.lock.RLock()
- count := m.snapshot.count
- m.lock.RUnlock()
- return count
-}
-
// Mark records the occurrence of n events.
func (m *StandardMeter) Mark(n int64) {
- m.lock.Lock()
- defer m.lock.Unlock()
- if m.stopped {
- return
- }
- m.snapshot.count += n
- m.a1.Update(n)
- m.a5.Update(n)
- m.a15.Update(n)
- m.updateSnapshot()
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (m *StandardMeter) Rate1() float64 {
- m.lock.RLock()
- rate1 := m.snapshot.rate1
- m.lock.RUnlock()
- return rate1
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (m *StandardMeter) Rate5() float64 {
- m.lock.RLock()
- rate5 := m.snapshot.rate5
- m.lock.RUnlock()
- return rate5
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (m *StandardMeter) Rate15() float64 {
- m.lock.RLock()
- rate15 := m.snapshot.rate15
- m.lock.RUnlock()
- return rate15
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (m *StandardMeter) RateMean() float64 {
- m.lock.RLock()
- rateMean := m.snapshot.rateMean
- m.lock.RUnlock()
- return rateMean
+ m.uncounted.Add(n)
}
// Snapshot returns a read-only copy of the meter.
-func (m *StandardMeter) Snapshot() Meter {
- m.lock.RLock()
- snapshot := *m.snapshot
- m.lock.RUnlock()
- return &snapshot
-}
-
-func (m *StandardMeter) updateSnapshot() {
- // should run with write lock held on m.lock
- snapshot := m.snapshot
- snapshot.rate1 = m.a1.Rate()
- snapshot.rate5 = m.a5.Rate()
- snapshot.rate15 = m.a15.Rate()
- snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
+func (m *StandardMeter) Snapshot() MeterSnapshot {
+ return &meterSnapshot{
+ count: m.count.Load() + m.uncounted.Load(),
+ rate1: m.a1.Snapshot().Rate(),
+ rate5: m.a5.Snapshot().Rate(),
+ rate15: m.a15.Snapshot().Rate(),
+ rateMean: math.Float64frombits(m.rateMean.Load()),
+ }
}
func (m *StandardMeter) tick() {
- m.lock.Lock()
- defer m.lock.Unlock()
+ // Take the uncounted values, add to count
+ n := m.uncounted.Swap(0)
+ count := m.count.Add(n)
+ m.rateMean.Store(math.Float64bits(float64(count) / time.Since(m.startTime).Seconds()))
+ // Update the EWMA's internal state
+ m.a1.Update(n)
+ m.a5.Update(n)
+ m.a15.Update(n)
+ // And trigger them to calculate the rates
m.a1.Tick()
m.a5.Tick()
m.a15.Tick()
- m.updateSnapshot()
}
// meterArbiter ticks meters every 5s from a single goroutine.
@@ -243,7 +171,7 @@ type meterArbiter struct {
ticker *time.Ticker
}
-var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
+var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})}
// Ticks meters on the scheduled interval
func (ma *meterArbiter) tick() {
diff --git a/metrics/meter_test.go b/metrics/meter_test.go
index e88922260..019c4d765 100644
--- a/metrics/meter_test.go
+++ b/metrics/meter_test.go
@@ -12,12 +12,18 @@ func BenchmarkMeter(b *testing.B) {
m.Mark(1)
}
}
-
+func TestMeter(t *testing.T) {
+ m := NewMeter()
+ m.Mark(47)
+ if v := m.Snapshot().Count(); v != 47 {
+ t.Fatalf("have %d want %d", v, 47)
+ }
+}
func TestGetOrRegisterMeter(t *testing.T) {
r := NewRegistry()
NewRegisteredMeter("foo", r).Mark(47)
- if m := GetOrRegisterMeter("foo", r); 47 != m.Count() {
- t.Fatal(m)
+ if m := GetOrRegisterMeter("foo", r).Snapshot(); m.Count() != 47 {
+ t.Fatal(m.Count())
}
}
@@ -26,13 +32,15 @@ func TestMeterDecay(t *testing.T) {
ticker: time.NewTicker(time.Millisecond),
meters: make(map[*StandardMeter]struct{}),
}
+ defer ma.ticker.Stop()
m := newStandardMeter()
ma.meters[m] = struct{}{}
- go ma.tick()
m.Mark(1)
- rateMean := m.RateMean()
+ ma.tickMeters()
+ rateMean := m.Snapshot().RateMean()
time.Sleep(100 * time.Millisecond)
- if m.RateMean() >= rateMean {
+ ma.tickMeters()
+ if m.Snapshot().RateMean() >= rateMean {
t.Error("m.RateMean() didn't decrease")
}
}
@@ -40,7 +48,7 @@ func TestMeterDecay(t *testing.T) {
func TestMeterNonzero(t *testing.T) {
m := NewMeter()
m.Mark(3)
- if count := m.Count(); 3 != count {
+ if count := m.Snapshot().Count(); count != 3 {
t.Errorf("m.Count(): 3 != %v\n", count)
}
}
@@ -48,26 +56,34 @@ func TestMeterNonzero(t *testing.T) {
func TestMeterStop(t *testing.T) {
l := len(arbiter.meters)
m := NewMeter()
- if len(arbiter.meters) != l+1 {
+ if l+1 != len(arbiter.meters) {
t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters))
}
m.Stop()
- if len(arbiter.meters) != l {
+ if l != len(arbiter.meters) {
t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters))
}
}
-func TestMeterSnapshot(t *testing.T) {
- m := NewMeter()
- m.Mark(1)
- if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
- t.Fatal(snapshot)
+func TestMeterZero(t *testing.T) {
+ m := NewMeter().Snapshot()
+ if count := m.Count(); count != 0 {
+ t.Errorf("m.Count(): 0 != %v\n", count)
}
}
-func TestMeterZero(t *testing.T) {
+func TestMeterRepeat(t *testing.T) {
m := NewMeter()
- if count := m.Count(); 0 != count {
- t.Errorf("m.Count(): 0 != %v\n", count)
+ for i := 0; i < 101; i++ {
+ m.Mark(int64(i))
+ }
+ if count := m.Snapshot().Count(); count != 5050 {
+ t.Errorf("m.Count(): 5050 != %v\n", count)
+ }
+ for i := 0; i < 101; i++ {
+ m.Mark(int64(i))
+ }
+ if count := m.Snapshot().Count(); count != 10100 {
+ t.Errorf("m.Count(): 10100 != %v\n", count)
}
}
diff --git a/metrics/metrics.go b/metrics/metrics.go
index dbb2727ec..864b51820 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -7,7 +7,8 @@ package metrics
import (
"os"
- "runtime"
+ "runtime/metrics"
+ "runtime/pprof"
"strings"
"time"
@@ -15,70 +16,211 @@ import (
)
// Enabled is checked by the constructor functions for all of the
-// standard metrics. If it is true, the metric returned is a stub.
+// standard metrics. If it is true, the metric returned is a stub.
//
// This global kill-switch helps quantify the observer effect and makes
// for less cluttered pprof profiles.
-var Enabled bool = false
+var Enabled = false
-// MetricsEnabledFlag is the CLI flag name to use to enable metrics collections.
-const MetricsEnabledFlag = "metrics"
+// EnabledExpensive is a soft-flag meant for external packages to check if costly
+// metrics gathering is allowed or not. The goal is to separate standard metrics
+// for health monitoring and debug metrics that might impact runtime performance.
+var EnabledExpensive = false
+
+// enablerFlags is the CLI flag names to use to enable metrics collections.
+var enablerFlags = []string{"metrics"}
+
+// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections.
+var expensiveEnablerFlags = []string{"metrics.expensive"}
// Init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
// and peek into the command line args for the metrics flag.
func init() {
for _, arg := range os.Args {
- if flag := strings.TrimLeft(arg, "-"); flag == MetricsEnabledFlag {
- log.Info("Enabling metrics collection")
- Enabled = true
+ flag := strings.TrimLeft(arg, "-")
+
+ for _, enabler := range enablerFlags {
+ if !Enabled && flag == enabler {
+ log.Info("Enabling metrics collection")
+ Enabled = true
+ }
+ }
+ for _, enabler := range expensiveEnablerFlags {
+ if !EnabledExpensive && flag == enabler {
+ log.Info("Enabling expensive metrics collection")
+ EnabledExpensive = true
+ }
+ }
+ }
+}
+
+var threadCreateProfile = pprof.Lookup("threadcreate")
+
+type runtimeStats struct {
+ GCPauses *metrics.Float64Histogram
+ GCAllocBytes uint64
+ GCFreedBytes uint64
+
+ MemTotal uint64
+ HeapObjects uint64
+ HeapFree uint64
+ HeapReleased uint64
+ HeapUnused uint64
+
+ Goroutines uint64
+ SchedLatency *metrics.Float64Histogram
+}
+
+var runtimeSamples = []metrics.Sample{
+ {Name: "/gc/pauses:seconds"}, // histogram
+ {Name: "/gc/heap/allocs:bytes"},
+ {Name: "/gc/heap/frees:bytes"},
+ {Name: "/memory/classes/total:bytes"},
+ {Name: "/memory/classes/heap/objects:bytes"},
+ {Name: "/memory/classes/heap/free:bytes"},
+ {Name: "/memory/classes/heap/released:bytes"},
+ {Name: "/memory/classes/heap/unused:bytes"},
+ {Name: "/sched/goroutines:goroutines"},
+ {Name: "/sched/latencies:seconds"}, // histogram
+}
+
+func ReadRuntimeStats() *runtimeStats {
+ r := new(runtimeStats)
+ readRuntimeStats(r)
+ return r
+}
+
+func readRuntimeStats(v *runtimeStats) {
+ metrics.Read(runtimeSamples)
+ for _, s := range runtimeSamples {
+ // Skip invalid/unknown metrics. This is needed because some metrics
+ // are unavailable in older Go versions, and attempting to read a 'bad'
+ // metric panics.
+ if s.Value.Kind() == metrics.KindBad {
+ continue
+ }
+
+ switch s.Name {
+ case "/gc/pauses:seconds":
+ v.GCPauses = s.Value.Float64Histogram()
+ case "/gc/heap/allocs:bytes":
+ v.GCAllocBytes = s.Value.Uint64()
+ case "/gc/heap/frees:bytes":
+ v.GCFreedBytes = s.Value.Uint64()
+ case "/memory/classes/total:bytes":
+ v.MemTotal = s.Value.Uint64()
+ case "/memory/classes/heap/objects:bytes":
+ v.HeapObjects = s.Value.Uint64()
+ case "/memory/classes/heap/free:bytes":
+ v.HeapFree = s.Value.Uint64()
+ case "/memory/classes/heap/released:bytes":
+ v.HeapReleased = s.Value.Uint64()
+ case "/memory/classes/heap/unused:bytes":
+ v.HeapUnused = s.Value.Uint64()
+ case "/sched/goroutines:goroutines":
+ v.Goroutines = s.Value.Uint64()
+ case "/sched/latencies:seconds":
+ v.SchedLatency = s.Value.Float64Histogram()
}
}
}
-// CollectProcessMetrics periodically collects various metrics about the running
-// process.
+// CollectProcessMetrics periodically collects various metrics about the running process.
func CollectProcessMetrics(refresh time.Duration) {
// Short circuit if the metrics system is disabled
if !Enabled {
return
}
+
// Create the various data collectors
- memstats := make([]*runtime.MemStats, 2)
- diskstats := make([]*DiskStats, 2)
- for i := 0; i < len(memstats); i++ {
- memstats[i] = new(runtime.MemStats)
- diskstats[i] = new(DiskStats)
- }
+ var (
+ cpustats = make([]CPUStats, 2)
+ diskstats = make([]DiskStats, 2)
+ rstats = make([]runtimeStats, 2)
+ )
+
+ // This scale factor is used for the runtime's time metrics. It's useful to convert to
+ // ns here because the runtime gives times in float seconds, but runtimeHistogram can
+ // only provide integers for the minimum and maximum values.
+ const secondsToNs = float64(time.Second)
+
// Define the various metrics to collect
- memAllocs := GetOrRegisterMeter("system/memory/allocs", DefaultRegistry)
- memFrees := GetOrRegisterMeter("system/memory/frees", DefaultRegistry)
- memInuse := GetOrRegisterMeter("system/memory/inuse", DefaultRegistry)
- memPauses := GetOrRegisterMeter("system/memory/pauses", DefaultRegistry)
-
- var diskReads, diskReadBytes, diskWrites, diskWriteBytes Meter
- if err := ReadDiskStats(diskstats[0]); err == nil {
- diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry)
- diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry)
- diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry)
- diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry)
- } else {
- log.Debug("Failed to read disk metrics", "err", err)
- }
- // Iterate loading the different stats and updating the meters
- for i := 1; ; i++ {
- runtime.ReadMemStats(memstats[i%2])
- memAllocs.Mark(int64(memstats[i%2].Mallocs - memstats[(i-1)%2].Mallocs))
- memFrees.Mark(int64(memstats[i%2].Frees - memstats[(i-1)%2].Frees))
- memInuse.Mark(int64(memstats[i%2].Alloc - memstats[(i-1)%2].Alloc))
- memPauses.Mark(int64(memstats[i%2].PauseTotalNs - memstats[(i-1)%2].PauseTotalNs))
-
- if ReadDiskStats(diskstats[i%2]) == nil {
- diskReads.Mark(diskstats[i%2].ReadCount - diskstats[(i-1)%2].ReadCount)
- diskReadBytes.Mark(diskstats[i%2].ReadBytes - diskstats[(i-1)%2].ReadBytes)
- diskWrites.Mark(diskstats[i%2].WriteCount - diskstats[(i-1)%2].WriteCount)
- diskWriteBytes.Mark(diskstats[i%2].WriteBytes - diskstats[(i-1)%2].WriteBytes)
+ var (
+ cpuSysLoad = GetOrRegisterGauge("system/cpu/sysload", DefaultRegistry)
+ cpuSysWait = GetOrRegisterGauge("system/cpu/syswait", DefaultRegistry)
+ cpuProcLoad = GetOrRegisterGauge("system/cpu/procload", DefaultRegistry)
+ cpuSysLoadTotal = GetOrRegisterCounterFloat64("system/cpu/sysload/total", DefaultRegistry)
+ cpuSysWaitTotal = GetOrRegisterCounterFloat64("system/cpu/syswait/total", DefaultRegistry)
+ cpuProcLoadTotal = GetOrRegisterCounterFloat64("system/cpu/procload/total", DefaultRegistry)
+ cpuThreads = GetOrRegisterGauge("system/cpu/threads", DefaultRegistry)
+ cpuGoroutines = GetOrRegisterGauge("system/cpu/goroutines", DefaultRegistry)
+ cpuSchedLatency = getOrRegisterRuntimeHistogram("system/cpu/schedlatency", secondsToNs, nil)
+ memPauses = getOrRegisterRuntimeHistogram("system/memory/pauses", secondsToNs, nil)
+ memAllocs = GetOrRegisterMeter("system/memory/allocs", DefaultRegistry)
+ memFrees = GetOrRegisterMeter("system/memory/frees", DefaultRegistry)
+ memTotal = GetOrRegisterGauge("system/memory/held", DefaultRegistry)
+ heapUsed = GetOrRegisterGauge("system/memory/used", DefaultRegistry)
+ heapObjects = GetOrRegisterGauge("system/memory/objects", DefaultRegistry)
+ diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry)
+ diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry)
+ diskReadBytesCounter = GetOrRegisterCounter("system/disk/readbytes", DefaultRegistry)
+ diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry)
+ diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry)
+ diskWriteBytesCounter = GetOrRegisterCounter("system/disk/writebytes", DefaultRegistry)
+ )
+
+ var lastCollectTime time.Time
+
+ // Iterate loading the different stats and updating the meters.
+ now, prev := 0, 1
+ for ; ; now, prev = prev, now {
+ // Gather CPU times.
+ ReadCPUStats(&cpustats[now])
+ collectTime := time.Now()
+ secondsSinceLastCollect := collectTime.Sub(lastCollectTime).Seconds()
+ lastCollectTime = collectTime
+ if secondsSinceLastCollect > 0 {
+ sysLoad := cpustats[now].GlobalTime - cpustats[prev].GlobalTime
+ sysWait := cpustats[now].GlobalWait - cpustats[prev].GlobalWait
+ procLoad := cpustats[now].LocalTime - cpustats[prev].LocalTime
+ // Convert to integer percentage.
+ cpuSysLoad.Update(int64(sysLoad / secondsSinceLastCollect * 100))
+ cpuSysWait.Update(int64(sysWait / secondsSinceLastCollect * 100))
+ cpuProcLoad.Update(int64(procLoad / secondsSinceLastCollect * 100))
+ // increment counters (ms)
+ cpuSysLoadTotal.Inc(sysLoad)
+ cpuSysWaitTotal.Inc(sysWait)
+ cpuProcLoadTotal.Inc(procLoad)
}
+
+ // Threads
+ cpuThreads.Update(int64(threadCreateProfile.Count()))
+
+ // Go runtime metrics
+ readRuntimeStats(&rstats[now])
+
+ cpuGoroutines.Update(int64(rstats[now].Goroutines))
+ cpuSchedLatency.update(rstats[now].SchedLatency)
+ memPauses.update(rstats[now].GCPauses)
+
+ memAllocs.Mark(int64(rstats[now].GCAllocBytes - rstats[prev].GCAllocBytes))
+ memFrees.Mark(int64(rstats[now].GCFreedBytes - rstats[prev].GCFreedBytes))
+
+ memTotal.Update(int64(rstats[now].MemTotal))
+ heapUsed.Update(int64(rstats[now].MemTotal - rstats[now].HeapUnused - rstats[now].HeapFree - rstats[now].HeapReleased))
+ heapObjects.Update(int64(rstats[now].HeapObjects))
+
+ // Disk
+ if ReadDiskStats(&diskstats[now]) == nil {
+ diskReads.Mark(diskstats[now].ReadCount - diskstats[prev].ReadCount)
+ diskReadBytes.Mark(diskstats[now].ReadBytes - diskstats[prev].ReadBytes)
+ diskWrites.Mark(diskstats[now].WriteCount - diskstats[prev].WriteCount)
+ diskWriteBytes.Mark(diskstats[now].WriteBytes - diskstats[prev].WriteBytes)
+ diskReadBytesCounter.Inc(diskstats[now].ReadBytes - diskstats[prev].ReadBytes)
+ diskWriteBytesCounter.Inc(diskstats[now].WriteBytes - diskstats[prev].WriteBytes)
+ }
+
time.Sleep(refresh)
}
}
diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go
index df36da0ad..2861d5f2c 100644
--- a/metrics/metrics_test.go
+++ b/metrics/metrics_test.go
@@ -2,8 +2,6 @@ package metrics
import (
"fmt"
- "io/ioutil"
- "log"
"sync"
"testing"
"time"
@@ -11,22 +9,22 @@ import (
const FANOUT = 128
-// Stop the compiler from complaining during debugging.
-var (
- _ = ioutil.Discard
- _ = log.LstdFlags
-)
+func TestReadRuntimeValues(t *testing.T) {
+ var v runtimeStats
+ readRuntimeStats(&v)
+ t.Logf("%+v", v)
+}
func BenchmarkMetrics(b *testing.B) {
r := NewRegistry()
c := NewRegisteredCounter("counter", r)
+ cf := NewRegisteredCounterFloat64("counterfloat64", r)
g := NewRegisteredGauge("gauge", r)
gf := NewRegisteredGaugeFloat64("gaugefloat64", r)
h := NewRegisteredHistogram("histogram", r, NewUniformSample(100))
m := NewRegisteredMeter("meter", r)
t := NewRegisteredTimer("timer", r)
RegisterDebugGCStats(r)
- RegisterRuntimeMemStats(r)
b.ResetTimer()
ch := make(chan bool)
@@ -48,24 +46,6 @@ func BenchmarkMetrics(b *testing.B) {
}()
//*/
- wgR := &sync.WaitGroup{}
- //*
- wgR.Add(1)
- go func() {
- defer wgR.Done()
- //log.Println("go CaptureRuntimeMemStats")
- for {
- select {
- case <-ch:
- //log.Println("done CaptureRuntimeMemStats")
- return
- default:
- CaptureRuntimeMemStatsOnce(r)
- }
- }
- }()
- //*/
-
wgW := &sync.WaitGroup{}
/*
wgW.Add(1)
@@ -78,7 +58,7 @@ func BenchmarkMetrics(b *testing.B) {
//log.Println("done Write")
return
default:
- WriteOnce(r, ioutil.Discard)
+ WriteOnce(r, io.Discard)
}
}
}()
@@ -92,6 +72,7 @@ func BenchmarkMetrics(b *testing.B) {
//log.Println("go", i)
for i := 0; i < b.N; i++ {
c.Inc(1)
+ cf.Inc(1.0)
g.Update(int64(i))
gf.Update(float64(i))
h.Update(int64(i))
@@ -104,7 +85,6 @@ func BenchmarkMetrics(b *testing.B) {
wg.Wait()
close(ch)
wgD.Wait()
- wgR.Wait()
wgW.Wait()
}
@@ -118,8 +98,8 @@ func Example() {
t.Time(func() { time.Sleep(10 * time.Millisecond) })
t.Update(1)
- fmt.Println(c.Count())
- fmt.Println(t.Min())
+ fmt.Println(c.Snapshot().Count())
+ fmt.Println(t.Snapshot().Min())
// Output: 17
// 1
}
diff --git a/metrics/opentsdb.go b/metrics/opentsdb.go
index df7f152ed..e81690f94 100644
--- a/metrics/opentsdb.go
+++ b/metrics/opentsdb.go
@@ -3,6 +3,7 @@ package metrics
import (
"bufio"
"fmt"
+ "io"
"log"
"net"
"os"
@@ -10,7 +11,7 @@ import (
"time"
)
-var shortHostName string = ""
+var shortHostName = ""
// OpenTSDBConfig provides a container with configuration parameters for
// the OpenTSDB exporter
@@ -57,24 +58,22 @@ func getShortHostname() string {
return shortHostName
}
-func openTSDB(c *OpenTSDBConfig) error {
- shortHostname := getShortHostname()
- now := time.Now().Unix()
+// writeRegistry writes the registry-metrics on the opentsb format.
+func (c *OpenTSDBConfig) writeRegistry(w io.Writer, now int64, shortHostname string) {
du := float64(c.DurationUnit)
- conn, err := net.DialTCP("tcp", nil, c.Addr)
- if nil != err {
- return err
- }
- defer conn.Close()
- w := bufio.NewWriter(conn)
+
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname)
+ case CounterFloat64:
+ fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname)
case Gauge:
- fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname)
case GaugeFloat64:
- fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname)
+ case GaugeInfo:
+ fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Snapshot().Value().String(), shortHostname)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
@@ -113,7 +112,17 @@ func openTSDB(c *OpenTSDBConfig) error {
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
}
- w.Flush()
})
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.writeRegistry(w, time.Now().Unix(), getShortHostname())
+ w.Flush()
return nil
}
diff --git a/metrics/opentsdb_test.go b/metrics/opentsdb_test.go
index c43728960..4548309f9 100644
--- a/metrics/opentsdb_test.go
+++ b/metrics/opentsdb_test.go
@@ -1,7 +1,11 @@
package metrics
import (
+ "fmt"
"net"
+ "os"
+ "strings"
+ "testing"
"time"
)
@@ -19,3 +23,44 @@ func ExampleOpenTSDBWithConfig() {
DurationUnit: time.Millisecond,
})
}
+
+func TestExampleOpenTSB(t *testing.T) {
+ r := NewOrderedRegistry()
+ NewRegisteredGaugeInfo("foo", r).Update(GaugeInfoValue{"chain_id": "5"})
+ NewRegisteredGaugeFloat64("pi", r).Update(3.14)
+ NewRegisteredCounter("months", r).Inc(12)
+ NewRegisteredCounterFloat64("tau", r).Inc(1.57)
+ NewRegisteredMeter("elite", r).Mark(1337)
+ NewRegisteredTimer("second", r).Update(time.Second)
+ NewRegisteredCounterFloat64("tau", r).Inc(1.57)
+ NewRegisteredCounterFloat64("tau", r).Inc(1.57)
+
+ w := new(strings.Builder)
+ (&OpenTSDBConfig{
+ Registry: r,
+ DurationUnit: time.Millisecond,
+ Prefix: "pre",
+ }).writeRegistry(w, 978307200, "hal9000")
+
+ wantB, err := os.ReadFile("./testdata/opentsb.want")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if have, want := w.String(), string(wantB); have != want {
+ t.Errorf("\nhave:\n%v\nwant:\n%v\n", have, want)
+ t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
+ }
+}
+
+func findFirstDiffPos(a, b string) string {
+ yy := strings.Split(b, "\n")
+ for i, x := range strings.Split(a, "\n") {
+ if i >= len(yy) {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: ", i, x, i)
+ }
+ if y := yy[i]; x != y {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y)
+ }
+ }
+ return ""
+}
diff --git a/metrics/prometheus/collector.go b/metrics/prometheus/collector.go
new file mode 100644
index 000000000..3cab99e2a
--- /dev/null
+++ b/metrics/prometheus/collector.go
@@ -0,0 +1,169 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/tomochain/tomochain/metrics"
+)
+
+var (
+ typeGaugeTpl = "# TYPE %s gauge\n"
+ typeCounterTpl = "# TYPE %s counter\n"
+ typeSummaryTpl = "# TYPE %s summary\n"
+ keyValueTpl = "%s %v\n\n"
+ keyQuantileTagValueTpl = "%s {quantile=\"%s\"} %v\n"
+)
+
+// collector is a collection of byte buffers that aggregate Prometheus reports
+// for different metric types.
+type collector struct {
+ buff *bytes.Buffer
+}
+
+// newCollector creates a new Prometheus metric aggregator.
+func newCollector() *collector {
+ return &collector{
+ buff: &bytes.Buffer{},
+ }
+}
+
+// Add adds the metric i to the collector. This method returns an error if the
+// metric type is not supported/known.
+func (c *collector) Add(name string, i any) error {
+ switch m := i.(type) {
+ case metrics.Counter:
+ c.addCounter(name, m.Snapshot())
+ case metrics.CounterFloat64:
+ c.addCounterFloat64(name, m.Snapshot())
+ case metrics.Gauge:
+ c.addGauge(name, m.Snapshot())
+ case metrics.GaugeFloat64:
+ c.addGaugeFloat64(name, m.Snapshot())
+ case metrics.GaugeInfo:
+ c.addGaugeInfo(name, m.Snapshot())
+ case metrics.Histogram:
+ c.addHistogram(name, m.Snapshot())
+ case metrics.Meter:
+ c.addMeter(name, m.Snapshot())
+ case metrics.Timer:
+ c.addTimer(name, m.Snapshot())
+ case metrics.ResettingTimer:
+ c.addResettingTimer(name, m.Snapshot())
+ default:
+ return fmt.Errorf("unknown prometheus metric type %T", i)
+ }
+ return nil
+}
+
+func (c *collector) addCounter(name string, m metrics.CounterSnapshot) {
+ c.writeGaugeCounter(name, m.Count())
+}
+
+func (c *collector) addCounterFloat64(name string, m metrics.CounterFloat64Snapshot) {
+ c.writeGaugeCounter(name, m.Count())
+}
+
+func (c *collector) addGauge(name string, m metrics.GaugeSnapshot) {
+ c.writeGaugeCounter(name, m.Value())
+}
+
+func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64Snapshot) {
+ c.writeGaugeCounter(name, m.Value())
+}
+
+func (c *collector) addGaugeInfo(name string, m metrics.GaugeInfoSnapshot) {
+ c.writeGaugeInfo(name, m.Value())
+}
+
+func (c *collector) addHistogram(name string, m metrics.HistogramSnapshot) {
+ pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
+ ps := m.Percentiles(pv)
+ c.writeSummaryCounter(name, m.Count())
+ c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
+ for i := range pv {
+ c.writeSummaryPercentile(name, strconv.FormatFloat(pv[i], 'f', -1, 64), ps[i])
+ }
+ c.buff.WriteRune('\n')
+}
+
+func (c *collector) addMeter(name string, m metrics.MeterSnapshot) {
+ c.writeGaugeCounter(name, m.Count())
+}
+
+func (c *collector) addTimer(name string, m metrics.TimerSnapshot) {
+ pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
+ ps := m.Percentiles(pv)
+ c.writeSummaryCounter(name, m.Count())
+ c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
+ for i := range pv {
+ c.writeSummaryPercentile(name, strconv.FormatFloat(pv[i], 'f', -1, 64), ps[i])
+ }
+ c.buff.WriteRune('\n')
+}
+
+func (c *collector) addResettingTimer(name string, m metrics.ResettingTimerSnapshot) {
+ if m.Count() <= 0 {
+ return
+ }
+ ps := m.Percentiles([]float64{0.50, 0.95, 0.99})
+ c.writeSummaryCounter(name, m.Count())
+ c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
+ c.writeSummaryPercentile(name, "0.50", ps[0])
+ c.writeSummaryPercentile(name, "0.95", ps[1])
+ c.writeSummaryPercentile(name, "0.99", ps[2])
+ c.buff.WriteRune('\n')
+}
+
+func (c *collector) writeGaugeInfo(name string, value metrics.GaugeInfoValue) {
+ name = mutateKey(name)
+ c.buff.WriteString(fmt.Sprintf(typeGaugeTpl, name))
+ c.buff.WriteString(name)
+ c.buff.WriteString(" ")
+ var kvs []string
+ for k, v := range value {
+ kvs = append(kvs, fmt.Sprintf("%v=%q", k, v))
+ }
+ sort.Strings(kvs)
+ c.buff.WriteString(fmt.Sprintf("{%v} 1\n\n", strings.Join(kvs, ", ")))
+}
+
+func (c *collector) writeGaugeCounter(name string, value interface{}) {
+ name = mutateKey(name)
+ c.buff.WriteString(fmt.Sprintf(typeGaugeTpl, name))
+ c.buff.WriteString(fmt.Sprintf(keyValueTpl, name, value))
+}
+
+func (c *collector) writeSummaryCounter(name string, value interface{}) {
+ name = mutateKey(name + "_count")
+ c.buff.WriteString(fmt.Sprintf(typeCounterTpl, name))
+ c.buff.WriteString(fmt.Sprintf(keyValueTpl, name, value))
+}
+
+func (c *collector) writeSummaryPercentile(name, p string, value interface{}) {
+ name = mutateKey(name)
+ c.buff.WriteString(fmt.Sprintf(keyQuantileTagValueTpl, name, p, value))
+}
+
+func mutateKey(key string) string {
+ return strings.ReplaceAll(key, "/", "_")
+}
diff --git a/metrics/prometheus/collector_test.go b/metrics/prometheus/collector_test.go
new file mode 100644
index 000000000..e3021b548
--- /dev/null
+++ b/metrics/prometheus/collector_test.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package prometheus
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/tomochain/tomochain/metrics"
+ "github.com/tomochain/tomochain/metrics/internal"
+)
+
+func TestMain(m *testing.M) {
+ metrics.Enabled = true
+ os.Exit(m.Run())
+}
+
+func TestCollector(t *testing.T) {
+ var (
+ c = newCollector()
+ want string
+ )
+ internal.ExampleMetrics().Each(func(name string, i interface{}) {
+ c.Add(name, i)
+ })
+ if wantB, err := os.ReadFile("./testdata/prometheus.want"); err != nil {
+ t.Fatal(err)
+ } else {
+ want = string(wantB)
+ }
+ if have := c.buff.String(); have != want {
+ t.Logf("have\n%v", have)
+ t.Logf("have vs want:\n%v", findFirstDiffPos(have, want))
+ t.Fatalf("unexpected collector output")
+ }
+}
+
+func findFirstDiffPos(a, b string) string {
+ yy := strings.Split(b, "\n")
+ for i, x := range strings.Split(a, "\n") {
+ if i >= len(yy) {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: ", i, x, i)
+ }
+ if y := yy[i]; x != y {
+ return fmt.Sprintf("have:%d: %s\nwant:%d: %s", i, x, i, y)
+ }
+ }
+ return ""
+}
diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go
new file mode 100644
index 000000000..2e904fb08
--- /dev/null
+++ b/metrics/prometheus/prometheus.go
@@ -0,0 +1,52 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package prometheus exposes go-metrics into a Prometheus format.
+package prometheus
+
+import (
+ "fmt"
+ "net/http"
+ "sort"
+
+ "github.com/tomochain/tomochain/log"
+ "github.com/tomochain/tomochain/metrics"
+)
+
+// Handler returns an HTTP handler which dump metrics in Prometheus format.
+func Handler(reg metrics.Registry) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Gather and pre-sort the metrics to avoid random listings
+ var names []string
+ reg.Each(func(name string, i interface{}) {
+ names = append(names, name)
+ })
+ sort.Strings(names)
+
+ // Aggregate all the metrics into a Prometheus collector
+ c := newCollector()
+
+ for _, name := range names {
+ i := reg.Get(name)
+ if err := c.Add(name, i); err != nil {
+ log.Warn("Unknown Prometheus metric type", "type", fmt.Sprintf("%T", i))
+ }
+ }
+ w.Header().Add("Content-Type", "text/plain")
+ w.Header().Add("Content-Length", fmt.Sprint(c.buff.Len()))
+ w.Write(c.buff.Bytes())
+ })
+}
diff --git a/metrics/prometheus/testdata/prometheus.want b/metrics/prometheus/testdata/prometheus.want
new file mode 100644
index 000000000..861c5f5cf
--- /dev/null
+++ b/metrics/prometheus/testdata/prometheus.want
@@ -0,0 +1,70 @@
+# TYPE system_cpu_schedlatency_count counter
+system_cpu_schedlatency_count 5645
+
+# TYPE system_cpu_schedlatency summary
+system_cpu_schedlatency {quantile="0.5"} 0
+system_cpu_schedlatency {quantile="0.75"} 7168
+system_cpu_schedlatency {quantile="0.95"} 1.6777216e+07
+system_cpu_schedlatency {quantile="0.99"} 2.9360128e+07
+system_cpu_schedlatency {quantile="0.999"} 3.3554432e+07
+system_cpu_schedlatency {quantile="0.9999"} 3.3554432e+07
+
+# TYPE system_memory_pauses_count counter
+system_memory_pauses_count 14
+
+# TYPE system_memory_pauses summary
+system_memory_pauses {quantile="0.5"} 32768
+system_memory_pauses {quantile="0.75"} 57344
+system_memory_pauses {quantile="0.95"} 196608
+system_memory_pauses {quantile="0.99"} 196608
+system_memory_pauses {quantile="0.999"} 196608
+system_memory_pauses {quantile="0.9999"} 196608
+
+# TYPE test_counter gauge
+test_counter 12345
+
+# TYPE test_counter_float64 gauge
+test_counter_float64 54321.98
+
+# TYPE test_gauge gauge
+test_gauge 23456
+
+# TYPE test_gauge_float64 gauge
+test_gauge_float64 34567.89
+
+# TYPE test_gauge_info gauge
+test_gauge_info {arch="amd64", commit="7caa2d8163ae3132c1c2d6978c76610caee2d949", os="linux", protocol_versions="64 65 66", version="1.10.18-unstable"} 1
+
+# TYPE test_histogram_count counter
+test_histogram_count 3
+
+# TYPE test_histogram summary
+test_histogram {quantile="0.5"} 2
+test_histogram {quantile="0.75"} 3
+test_histogram {quantile="0.95"} 3
+test_histogram {quantile="0.99"} 3
+test_histogram {quantile="0.999"} 3
+test_histogram {quantile="0.9999"} 3
+
+# TYPE test_meter gauge
+test_meter 0
+
+# TYPE test_resetting_timer_count counter
+test_resetting_timer_count 6
+
+# TYPE test_resetting_timer summary
+test_resetting_timer {quantile="0.50"} 1.25e+07
+test_resetting_timer {quantile="0.95"} 1.2e+08
+test_resetting_timer {quantile="0.99"} 1.2e+08
+
+# TYPE test_timer_count counter
+test_timer_count 6
+
+# TYPE test_timer summary
+test_timer {quantile="0.5"} 2.25e+07
+test_timer {quantile="0.75"} 4.8e+07
+test_timer {quantile="0.95"} 1.2e+08
+test_timer {quantile="0.99"} 1.2e+08
+test_timer {quantile="0.999"} 1.2e+08
+test_timer {quantile="0.9999"} 1.2e+08
+
diff --git a/metrics/registry.go b/metrics/registry.go
index cc34c9dfd..8bfbc0804 100644
--- a/metrics/registry.go
+++ b/metrics/registry.go
@@ -3,6 +3,7 @@ package metrics
import (
"fmt"
"reflect"
+ "sort"
"strings"
"sync"
)
@@ -45,21 +46,39 @@ type Registry interface {
// Unregister the metric with the given name.
Unregister(string)
+}
- // Unregister all metrics. (Mostly for testing.)
- UnregisterAll()
+type orderedRegistry struct {
+ StandardRegistry
}
-// The standard implementation of a Registry is a mutex-protected map
-// of names to metrics.
-type StandardRegistry struct {
- metrics map[string]interface{}
- mutex sync.Mutex
+// Call the given function for each registered metric.
+func (r *orderedRegistry) Each(f func(string, interface{})) {
+ var names []string
+ reg := r.registered()
+ for name := range reg {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ f(name, reg[name])
+ }
}
-// Create a new registry.
+// NewRegistry creates a new registry.
func NewRegistry() Registry {
- return &StandardRegistry{metrics: make(map[string]interface{})}
+ return new(StandardRegistry)
+}
+
+// NewOrderedRegistry creates a new ordered registry (for testing).
+func NewOrderedRegistry() Registry {
+ return new(orderedRegistry)
+}
+
+// The standard implementation of a Registry uses sync.map
+// of names to metrics.
+type StandardRegistry struct {
+ metrics sync.Map
}
// Call the given function for each registered metric.
@@ -71,9 +90,8 @@ func (r *StandardRegistry) Each(f func(string, interface{})) {
// Get the metric by the given name or nil if none is registered.
func (r *StandardRegistry) Get(name string) interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- return r.metrics[name]
+ item, _ := r.metrics.Load(name)
+ return item
}
// Gets an existing metric or creates and registers a new one. Threadsafe
@@ -81,35 +99,48 @@ func (r *StandardRegistry) Get(name string) interface{} {
// The interface can be the metric to register if not found in registry,
// or a function returning the metric for lazy instantiation.
func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- if metric, ok := r.metrics[name]; ok {
- return metric
+ // fast path
+ cached, ok := r.metrics.Load(name)
+ if ok {
+ return cached
}
if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
i = v.Call(nil)[0].Interface()
}
- r.register(name, i)
- return i
+ item, _, ok := r.loadOrRegister(name, i)
+ if !ok {
+ return i
+ }
+ return item
}
// Register the given metric under the given name. Returns a DuplicateMetric
// if a metric by the given name is already registered.
func (r *StandardRegistry) Register(name string, i interface{}) error {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- return r.register(name, i)
+ // fast path
+ _, ok := r.metrics.Load(name)
+ if ok {
+ return DuplicateMetric(name)
+ }
+
+ if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+ i = v.Call(nil)[0].Interface()
+ }
+ _, loaded, _ := r.loadOrRegister(name, i)
+ if loaded {
+ return DuplicateMetric(name)
+ }
+ return nil
}
// Run all registered healthchecks.
func (r *StandardRegistry) RunHealthchecks() {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- for _, i := range r.metrics {
- if h, ok := i.(Healthcheck); ok {
+ r.metrics.Range(func(key, value any) bool {
+ if h, ok := value.(Healthcheck); ok {
h.Check()
}
- }
+ return true
+ })
}
// GetAll metrics in the Registry
@@ -119,11 +150,13 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
values := make(map[string]interface{})
switch metric := i.(type) {
case Counter:
- values["count"] = metric.Count()
+ values["count"] = metric.Snapshot().Count()
+ case CounterFloat64:
+ values["count"] = metric.Snapshot().Count()
case Gauge:
- values["value"] = metric.Value()
+ values["value"] = metric.Snapshot().Value()
case GaugeFloat64:
- values["value"] = metric.Value()
+ values["value"] = metric.Snapshot().Value()
case Healthcheck:
values["error"] = nil
metric.Check()
@@ -175,45 +208,31 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
// Unregister the metric with the given name.
func (r *StandardRegistry) Unregister(name string) {
- r.mutex.Lock()
- defer r.mutex.Unlock()
r.stop(name)
- delete(r.metrics, name)
-}
-
-// Unregister all metrics. (Mostly for testing.)
-func (r *StandardRegistry) UnregisterAll() {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- for name := range r.metrics {
- r.stop(name)
- delete(r.metrics, name)
- }
+ r.metrics.LoadAndDelete(name)
}
-func (r *StandardRegistry) register(name string, i interface{}) error {
- if _, ok := r.metrics[name]; ok {
- return DuplicateMetric(name)
- }
+func (r *StandardRegistry) loadOrRegister(name string, i interface{}) (interface{}, bool, bool) {
switch i.(type) {
- case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
- r.metrics[name] = i
+ case Counter, CounterFloat64, Gauge, GaugeFloat64, GaugeInfo, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
+ default:
+ return nil, false, false
}
- return nil
+ item, loaded := r.metrics.LoadOrStore(name, i)
+ return item, loaded, true
}
func (r *StandardRegistry) registered() map[string]interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- metrics := make(map[string]interface{}, len(r.metrics))
- for name, i := range r.metrics {
- metrics[name] = i
- }
+ metrics := make(map[string]interface{})
+ r.metrics.Range(func(key, value any) bool {
+ metrics[key.(string)] = value
+ return true
+ })
return metrics
}
func (r *StandardRegistry) stop(name string) {
- if i, ok := r.metrics[name]; ok {
+ if i, ok := r.metrics.Load(name); ok {
if s, ok := i.(Stoppable); ok {
s.Stop()
}
@@ -306,12 +325,11 @@ func (r *PrefixedRegistry) Unregister(name string) {
r.underlying.Unregister(realName)
}
-// Unregister all metrics. (Mostly for testing.)
-func (r *PrefixedRegistry) UnregisterAll() {
- r.underlying.UnregisterAll()
-}
-
-var DefaultRegistry Registry = NewRegistry()
+var (
+ DefaultRegistry = NewRegistry()
+ EphemeralRegistry = NewRegistry()
+ AccountingRegistry = NewRegistry() // registry used in swarm
+)
// Call the given function for each registered metric.
func Each(f func(string, interface{})) {
diff --git a/metrics/registry_test.go b/metrics/registry_test.go
index a63e485fe..75012dd4a 100644
--- a/metrics/registry_test.go
+++ b/metrics/registry_test.go
@@ -1,6 +1,7 @@
package metrics
import (
+ "sync"
"testing"
)
@@ -13,26 +14,50 @@ func BenchmarkRegistry(b *testing.B) {
}
}
+func BenchmarkRegistryGetOrRegisterParallel_8(b *testing.B) {
+ benchmarkRegistryGetOrRegisterParallel(b, 8)
+}
+
+func BenchmarkRegistryGetOrRegisterParallel_32(b *testing.B) {
+ benchmarkRegistryGetOrRegisterParallel(b, 32)
+}
+
+func benchmarkRegistryGetOrRegisterParallel(b *testing.B, amount int) {
+ r := NewRegistry()
+ b.ResetTimer()
+ var wg sync.WaitGroup
+ for i := 0; i < amount; i++ {
+ wg.Add(1)
+ go func() {
+ for i := 0; i < b.N; i++ {
+ r.GetOrRegister("foo", NewMeter)
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
+
func TestRegistry(t *testing.T) {
r := NewRegistry()
r.Register("foo", NewCounter())
i := 0
r.Each(func(name string, iface interface{}) {
i++
- if "foo" != name {
+ if name != "foo" {
t.Fatal(name)
}
if _, ok := iface.(Counter); !ok {
t.Fatal(iface)
}
})
- if 1 != i {
+ if i != 1 {
t.Fatal(i)
}
r.Unregister("foo")
i = 0
r.Each(func(string, interface{}) { i++ })
- if 0 != i {
+ if i != 0 {
t.Fatal(i)
}
}
@@ -52,7 +77,7 @@ func TestRegistryDuplicate(t *testing.T) {
t.Fatal(iface)
}
})
- if 1 != i {
+ if i != 1 {
t.Fatal(i)
}
}
@@ -60,11 +85,11 @@ func TestRegistryDuplicate(t *testing.T) {
func TestRegistryGet(t *testing.T) {
r := NewRegistry()
r.Register("foo", NewCounter())
- if count := r.Get("foo").(Counter).Count(); 0 != count {
+ if count := r.Get("foo").(Counter).Snapshot().Count(); count != 0 {
t.Fatal(count)
}
r.Get("foo").(Counter).Inc(1)
- if count := r.Get("foo").(Counter).Count(); 1 != count {
+ if count := r.Get("foo").(Counter).Snapshot().Count(); count != 1 {
t.Fatal(count)
}
}
@@ -271,6 +296,9 @@ func TestChildPrefixedRegistryOfChildRegister(t *testing.T) {
t.Fatal(err.Error())
}
err = r2.Register("baz", NewCounter())
+ if err != nil {
+ t.Fatal(err.Error())
+ }
c := NewCounter()
Register("bars", c)
@@ -278,7 +306,7 @@ func TestChildPrefixedRegistryOfChildRegister(t *testing.T) {
r2.Each(func(name string, m interface{}) {
i++
if name != "prefix.prefix2.baz" {
- //t.Fatal(name)
+ t.Fatal(name)
}
})
if i != 1 {
@@ -294,12 +322,14 @@ func TestWalkRegistries(t *testing.T) {
t.Fatal(err.Error())
}
err = r2.Register("baz", NewCounter())
+ if err != nil {
+ t.Fatal(err.Error())
+ }
c := NewCounter()
Register("bars", c)
_, prefix := findPrefix(r2, "")
- if "prefix.prefix2." != prefix {
+ if prefix != "prefix.prefix2." {
t.Fatal(prefix)
}
-
}
diff --git a/metrics/resetting_sample.go b/metrics/resetting_sample.go
new file mode 100644
index 000000000..c38ffcd3e
--- /dev/null
+++ b/metrics/resetting_sample.go
@@ -0,0 +1,24 @@
+package metrics
+
+// ResettingSample converts an ordinary sample into one that resets whenever its
+// snapshot is retrieved. This will break for multi-monitor systems, but when only
+// a single metric is being pushed out, this ensure that low-frequency events don't
+// skew th charts indefinitely.
+func ResettingSample(sample Sample) Sample {
+ return &resettingSample{
+ Sample: sample,
+ }
+}
+
+// resettingSample is a simple wrapper around a sample that resets it upon the
+// snapshot retrieval.
+type resettingSample struct {
+ Sample
+}
+
+// Snapshot returns a read-only copy of the sample with the original reset.
+func (rs *resettingSample) Snapshot() SampleSnapshot {
+ s := rs.Sample.Snapshot()
+ rs.Sample.Clear()
+ return s
+}
diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go
index 57bcb3134..6802e3fce 100644
--- a/metrics/resetting_timer.go
+++ b/metrics/resetting_timer.go
@@ -1,8 +1,6 @@
package metrics
import (
- "math"
- "sort"
"sync"
"time"
)
@@ -10,12 +8,17 @@ import (
// Initial slice capacity for the values stored in a ResettingTimer
const InitialResettingTimerSliceCap = 10
+type ResettingTimerSnapshot interface {
+ Count() int
+ Mean() float64
+ Max() int64
+ Min() int64
+ Percentiles([]float64) []float64
+}
+
// ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval.
type ResettingTimer interface {
- Values() []int64
- Snapshot() ResettingTimer
- Percentiles([]float64) []int64
- Mean() float64
+ Snapshot() ResettingTimerSnapshot
Time(func())
Update(time.Duration)
UpdateSince(time.Time)
@@ -51,66 +54,40 @@ func NewResettingTimer() ResettingTimer {
}
// NilResettingTimer is a no-op ResettingTimer.
-type NilResettingTimer struct {
-}
-
-// Values is a no-op.
-func (NilResettingTimer) Values() []int64 { return nil }
-
-// Snapshot is a no-op.
-func (NilResettingTimer) Snapshot() ResettingTimer { return NilResettingTimer{} }
-
-// Time is a no-op.
-func (NilResettingTimer) Time(func()) {}
-
-// Update is a no-op.
-func (NilResettingTimer) Update(time.Duration) {}
-
-// Percentiles panics.
-func (NilResettingTimer) Percentiles([]float64) []int64 {
- panic("Percentiles called on a NilResettingTimer")
-}
-
-// Mean panics.
-func (NilResettingTimer) Mean() float64 {
- panic("Mean called on a NilResettingTimer")
-}
-
-// UpdateSince is a no-op.
-func (NilResettingTimer) UpdateSince(time.Time) {}
+type NilResettingTimer struct{}
+
+func (NilResettingTimer) Values() []int64 { return nil }
+func (n NilResettingTimer) Snapshot() ResettingTimerSnapshot { return n }
+func (NilResettingTimer) Time(f func()) { f() }
+func (NilResettingTimer) Update(time.Duration) {}
+func (NilResettingTimer) Percentiles([]float64) []float64 { return nil }
+func (NilResettingTimer) Mean() float64 { return 0.0 }
+func (NilResettingTimer) Max() int64 { return 0 }
+func (NilResettingTimer) Min() int64 { return 0 }
+func (NilResettingTimer) UpdateSince(time.Time) {}
+func (NilResettingTimer) Count() int { return 0 }
// StandardResettingTimer is the standard implementation of a ResettingTimer.
// and Meter.
type StandardResettingTimer struct {
values []int64
- mutex sync.Mutex
-}
+ sum int64 // sum is a running count of the total sum, used later to calculate mean
-// Values returns a slice with all measurements.
-func (t *StandardResettingTimer) Values() []int64 {
- return t.values
+ mutex sync.Mutex
}
// Snapshot resets the timer and returns a read-only copy of its contents.
-func (t *StandardResettingTimer) Snapshot() ResettingTimer {
+func (t *StandardResettingTimer) Snapshot() ResettingTimerSnapshot {
t.mutex.Lock()
defer t.mutex.Unlock()
- currentValues := t.values
- t.values = make([]int64, 0, InitialResettingTimerSliceCap)
-
- return &ResettingTimerSnapshot{
- values: currentValues,
+ snapshot := &resettingTimerSnapshot{}
+ if len(t.values) > 0 {
+ snapshot.mean = float64(t.sum) / float64(len(t.values))
+ snapshot.values = t.values
+ t.values = make([]int64, 0, InitialResettingTimerSliceCap)
}
-}
-
-// Percentiles panics.
-func (t *StandardResettingTimer) Percentiles([]float64) []int64 {
- panic("Percentiles called on a StandardResettingTimer")
-}
-
-// Mean panics.
-func (t *StandardResettingTimer) Mean() float64 {
- panic("Mean called on a StandardResettingTimer")
+ t.sum = 0
+ return snapshot
}
// Record the duration of the execution of the given function.
@@ -125,113 +102,70 @@ func (t *StandardResettingTimer) Update(d time.Duration) {
t.mutex.Lock()
defer t.mutex.Unlock()
t.values = append(t.values, int64(d))
+ t.sum += int64(d)
}
// Record the duration of an event that started at a time and ends now.
func (t *StandardResettingTimer) UpdateSince(ts time.Time) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.values = append(t.values, int64(time.Since(ts)))
+ t.Update(time.Since(ts))
}
-// ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
-type ResettingTimerSnapshot struct {
+// resettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
+type resettingTimerSnapshot struct {
values []int64
mean float64
- thresholdBoundaries []int64
+ max int64
+ min int64
+ thresholdBoundaries []float64
calculated bool
}
-// Snapshot returns the snapshot.
-func (t *ResettingTimerSnapshot) Snapshot() ResettingTimer { return t }
-
-// Time panics.
-func (*ResettingTimerSnapshot) Time(func()) {
- panic("Time called on a ResettingTimerSnapshot")
-}
-
-// Update panics.
-func (*ResettingTimerSnapshot) Update(time.Duration) {
- panic("Update called on a ResettingTimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*ResettingTimerSnapshot) UpdateSince(time.Time) {
- panic("UpdateSince called on a ResettingTimerSnapshot")
-}
-
-// Values returns all values from snapshot.
-func (t *ResettingTimerSnapshot) Values() []int64 {
- return t.values
+// Count return the length of the values from snapshot.
+func (t *resettingTimerSnapshot) Count() int {
+ return len(t.values)
}
// Percentiles returns the boundaries for the input percentiles.
-func (t *ResettingTimerSnapshot) Percentiles(percentiles []float64) []int64 {
+// note: this method is not thread safe
+func (t *resettingTimerSnapshot) Percentiles(percentiles []float64) []float64 {
t.calc(percentiles)
-
return t.thresholdBoundaries
}
// Mean returns the mean of the snapshotted values
-func (t *ResettingTimerSnapshot) Mean() float64 {
+// note: this method is not thread safe
+func (t *resettingTimerSnapshot) Mean() float64 {
if !t.calculated {
- t.calc([]float64{})
+ t.calc(nil)
}
return t.mean
}
-func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
- sort.Sort(Int64Slice(t.values))
-
- count := len(t.values)
- if count > 0 {
- min := t.values[0]
- max := t.values[count-1]
-
- cumulativeValues := make([]int64, count)
- cumulativeValues[0] = min
- for i := 1; i < count; i++ {
- cumulativeValues[i] = t.values[i] + cumulativeValues[i-1]
- }
-
- t.thresholdBoundaries = make([]int64, len(percentiles))
-
- thresholdBoundary := max
-
- for i, pct := range percentiles {
- if count > 1 {
- var abs float64
- if pct >= 0 {
- abs = pct
- } else {
- abs = 100 + pct
- }
- // poor man's math.Round(x):
- // math.Floor(x + 0.5)
- indexOfPerc := int(math.Floor(((abs / 100.0) * float64(count)) + 0.5))
- if pct >= 0 {
- indexOfPerc -= 1 // index offset=0
- }
- thresholdBoundary = t.values[indexOfPerc]
- }
-
- t.thresholdBoundaries[i] = thresholdBoundary
- }
-
- sum := cumulativeValues[count-1]
- t.mean = float64(sum) / float64(count)
- } else {
- t.thresholdBoundaries = make([]int64, len(percentiles))
- t.mean = 0
+// Max returns the max of the snapshotted values
+// note: this method is not thread safe
+func (t *resettingTimerSnapshot) Max() int64 {
+ if !t.calculated {
+ t.calc(nil)
}
-
- t.calculated = true
+ return t.max
}
-// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order.
-type Int64Slice []int64
+// Min returns the min of the snapshotted values
+// note: this method is not thread safe
+func (t *resettingTimerSnapshot) Min() int64 {
+ if !t.calculated {
+ t.calc(nil)
+ }
+ return t.min
+}
-func (s Int64Slice) Len() int { return len(s) }
-func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (t *resettingTimerSnapshot) calc(percentiles []float64) {
+ scores := CalculatePercentiles(t.values, percentiles)
+ t.thresholdBoundaries = scores
+ if len(t.values) == 0 {
+ return
+ }
+ t.min = t.values[0]
+ t.max = t.values[len(t.values)-1]
+}
diff --git a/metrics/resetting_timer_test.go b/metrics/resetting_timer_test.go
index 58fd47f35..4571fc8eb 100644
--- a/metrics/resetting_timer_test.go
+++ b/metrics/resetting_timer_test.go
@@ -10,9 +10,9 @@ func TestResettingTimer(t *testing.T) {
values []int64
start int
end int
- wantP50 int64
- wantP95 int64
- wantP99 int64
+ wantP50 float64
+ wantP95 float64
+ wantP99 float64
wantMean float64
wantMin int64
wantMax int64
@@ -21,14 +21,14 @@ func TestResettingTimer(t *testing.T) {
values: []int64{},
start: 1,
end: 11,
- wantP50: 5, wantP95: 10, wantP99: 10,
+ wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
{
values: []int64{},
start: 1,
end: 101,
- wantP50: 50, wantP95: 95, wantP99: 99,
+ wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99,
wantMin: 1, wantMax: 100, wantMean: 50.5,
},
{
@@ -56,11 +56,11 @@ func TestResettingTimer(t *testing.T) {
values: []int64{1, 10},
start: 0,
end: 0,
- wantP50: 1, wantP95: 10, wantP99: 10,
+ wantP50: 5.5, wantP95: 10, wantP99: 10,
wantMin: 1, wantMax: 10, wantMean: 5.5,
},
}
- for ind, tt := range tests {
+ for i, tt := range tests {
timer := NewResettingTimer()
for i := tt.start; i < tt.end; i++ {
@@ -70,37 +70,128 @@ func TestResettingTimer(t *testing.T) {
for _, v := range tt.values {
timer.Update(time.Duration(v))
}
-
snap := timer.Snapshot()
- ps := snap.Percentiles([]float64{50, 95, 99})
+ ps := snap.Percentiles([]float64{0.50, 0.95, 0.99})
- val := snap.Values()
+ if have, want := snap.Min(), tt.wantMin; have != want {
+ t.Fatalf("%d: min: have %d, want %d", i, have, want)
+ }
+ if have, want := snap.Max(), tt.wantMax; have != want {
+ t.Fatalf("%d: max: have %d, want %d", i, have, want)
+ }
+ if have, want := snap.Mean(), tt.wantMean; have != want {
+ t.Fatalf("%d: mean: have %v, want %v", i, have, want)
+ }
+ if have, want := ps[0], tt.wantP50; have != want {
+ t.Errorf("%d: p50: have %v, want %v", i, have, want)
+ }
+ if have, want := ps[1], tt.wantP95; have != want {
+ t.Errorf("%d: p95: have %v, want %v", i, have, want)
+ }
+ if have, want := ps[2], tt.wantP99; have != want {
+ t.Errorf("%d: p99: have %v, want %v", i, have, want)
+ }
+ }
+}
- if len(val) > 0 {
- if tt.wantMin != val[0] {
- t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin)
- }
+func TestResettingTimerWithFivePercentiles(t *testing.T) {
+ tests := []struct {
+ values []int64
+ start int
+ end int
+ wantP05 float64
+ wantP20 float64
+ wantP50 float64
+ wantP95 float64
+ wantP99 float64
+ wantMean float64
+ wantMin int64
+ wantMax int64
+ }{
+ {
+ values: []int64{},
+ start: 1,
+ end: 11,
+ wantP05: 1, wantP20: 2.2, wantP50: 5.5, wantP95: 10, wantP99: 10,
+ wantMin: 1, wantMax: 10, wantMean: 5.5,
+ },
+ {
+ values: []int64{},
+ start: 1,
+ end: 101,
+ wantP05: 5.050000000000001, wantP20: 20.200000000000003, wantP50: 50.5, wantP95: 95.94999999999999, wantP99: 99.99,
+ wantMin: 1, wantMax: 100, wantMean: 50.5,
+ },
+ {
+ values: []int64{1},
+ start: 0,
+ end: 0,
+ wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 1, wantP99: 1,
+ wantMin: 1, wantMax: 1, wantMean: 1,
+ },
+ {
+ values: []int64{0},
+ start: 0,
+ end: 0,
+ wantP05: 0, wantP20: 0, wantP50: 0, wantP95: 0, wantP99: 0,
+ wantMin: 0, wantMax: 0, wantMean: 0,
+ },
+ {
+ values: []int64{},
+ start: 0,
+ end: 0,
+ wantP05: 0, wantP20: 0, wantP50: 0, wantP95: 0, wantP99: 0,
+ wantMin: 0, wantMax: 0, wantMean: 0,
+ },
+ {
+ values: []int64{1, 10},
+ start: 0,
+ end: 0,
+ wantP05: 1, wantP20: 1, wantP50: 5.5, wantP95: 10, wantP99: 10,
+ wantMin: 1, wantMax: 10, wantMean: 5.5,
+ },
+ }
+ for ind, tt := range tests {
+ timer := NewResettingTimer()
- if tt.wantMax != val[len(val)-1] {
- t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax)
- }
+ for i := tt.start; i < tt.end; i++ {
+ tt.values = append(tt.values, int64(i))
}
- if tt.wantMean != snap.Mean() {
- t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
+ for _, v := range tt.values {
+ timer.Update(time.Duration(v))
}
- if tt.wantP50 != ps[0] {
- t.Fatalf("%d: p50: got %d, want %d", ind, ps[0], tt.wantP50)
+ snap := timer.Snapshot()
+
+ ps := snap.Percentiles([]float64{0.05, 0.20, 0.50, 0.95, 0.99})
+
+ if tt.wantMin != snap.Min() {
+ t.Errorf("%d: min: got %d, want %d", ind, snap.Min(), tt.wantMin)
}
- if tt.wantP95 != ps[1] {
- t.Fatalf("%d: p95: got %d, want %d", ind, ps[1], tt.wantP95)
+ if tt.wantMax != snap.Max() {
+ t.Errorf("%d: max: got %d, want %d", ind, snap.Max(), tt.wantMax)
}
- if tt.wantP99 != ps[2] {
- t.Fatalf("%d: p99: got %d, want %d", ind, ps[2], tt.wantP99)
+ if tt.wantMean != snap.Mean() {
+ t.Errorf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
+ }
+ if tt.wantP05 != ps[0] {
+ t.Errorf("%d: p05: got %v, want %v", ind, ps[0], tt.wantP05)
+ }
+ if tt.wantP20 != ps[1] {
+ t.Errorf("%d: p20: got %v, want %v", ind, ps[1], tt.wantP20)
+ }
+ if tt.wantP50 != ps[2] {
+ t.Errorf("%d: p50: got %v, want %v", ind, ps[2], tt.wantP50)
+ }
+ if tt.wantP95 != ps[3] {
+ t.Errorf("%d: p95: got %v, want %v", ind, ps[3], tt.wantP95)
+ }
+ if tt.wantP99 != ps[4] {
+ t.Errorf("%d: p99: got %v, want %v", ind, ps[4], tt.wantP99)
}
}
}
diff --git a/metrics/runtime.go b/metrics/runtime.go
deleted file mode 100644
index 9450c479b..000000000
--- a/metrics/runtime.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package metrics
-
-import (
- "runtime"
- "runtime/pprof"
- "time"
-)
-
-var (
- memStats runtime.MemStats
- runtimeMetrics struct {
- MemStats struct {
- Alloc Gauge
- BuckHashSys Gauge
- DebugGC Gauge
- EnableGC Gauge
- Frees Gauge
- HeapAlloc Gauge
- HeapIdle Gauge
- HeapInuse Gauge
- HeapObjects Gauge
- HeapReleased Gauge
- HeapSys Gauge
- LastGC Gauge
- Lookups Gauge
- Mallocs Gauge
- MCacheInuse Gauge
- MCacheSys Gauge
- MSpanInuse Gauge
- MSpanSys Gauge
- NextGC Gauge
- NumGC Gauge
- GCCPUFraction GaugeFloat64
- PauseNs Histogram
- PauseTotalNs Gauge
- StackInuse Gauge
- StackSys Gauge
- Sys Gauge
- TotalAlloc Gauge
- }
- NumCgoCall Gauge
- NumGoroutine Gauge
- NumThread Gauge
- ReadMemStats Timer
- }
- frees uint64
- lookups uint64
- mallocs uint64
- numGC uint32
- numCgoCalls int64
-
- threadCreateProfile = pprof.Lookup("threadcreate")
-)
-
-// Capture new values for the Go runtime statistics exported in
-// runtime.MemStats. This is designed to be called as a goroutine.
-func CaptureRuntimeMemStats(r Registry, d time.Duration) {
- for range time.Tick(d) {
- CaptureRuntimeMemStatsOnce(r)
- }
-}
-
-// Capture new values for the Go runtime statistics exported in
-// runtime.MemStats. This is designed to be called in a background
-// goroutine. Giving a registry which has not been given to
-// RegisterRuntimeMemStats will panic.
-//
-// Be very careful with this because runtime.ReadMemStats calls the C
-// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
-// and that last one does what it says on the tin.
-func CaptureRuntimeMemStatsOnce(r Registry) {
- t := time.Now()
- runtime.ReadMemStats(&memStats) // This takes 50-200us.
- runtimeMetrics.ReadMemStats.UpdateSince(t)
-
- runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
- runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
- if memStats.DebugGC {
- runtimeMetrics.MemStats.DebugGC.Update(1)
- } else {
- runtimeMetrics.MemStats.DebugGC.Update(0)
- }
- if memStats.EnableGC {
- runtimeMetrics.MemStats.EnableGC.Update(1)
- } else {
- runtimeMetrics.MemStats.EnableGC.Update(0)
- }
-
- runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
- runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
- runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
- runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
- runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
- runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
- runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
- runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
- runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
- runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
- runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
- runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
- runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
- runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
- runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
- runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
- runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
-
- //
- i := numGC % uint32(len(memStats.PauseNs))
- ii := memStats.NumGC % uint32(len(memStats.PauseNs))
- if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
- for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- } else {
- if i > ii {
- for ; i < uint32(len(memStats.PauseNs)); i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- i = 0
- }
- for ; i < ii; i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- }
- frees = memStats.Frees
- lookups = memStats.Lookups
- mallocs = memStats.Mallocs
- numGC = memStats.NumGC
-
- runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
- runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
- runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
- runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
- runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
-
- currentNumCgoCalls := numCgoCall()
- runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
- numCgoCalls = currentNumCgoCalls
-
- runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
-
- runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
-}
-
-// Register runtimeMetrics for the Go runtime statistics exported in runtime and
-// specifically runtime.MemStats. The runtimeMetrics are named by their
-// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
-func RegisterRuntimeMemStats(r Registry) {
- runtimeMetrics.MemStats.Alloc = NewGauge()
- runtimeMetrics.MemStats.BuckHashSys = NewGauge()
- runtimeMetrics.MemStats.DebugGC = NewGauge()
- runtimeMetrics.MemStats.EnableGC = NewGauge()
- runtimeMetrics.MemStats.Frees = NewGauge()
- runtimeMetrics.MemStats.HeapAlloc = NewGauge()
- runtimeMetrics.MemStats.HeapIdle = NewGauge()
- runtimeMetrics.MemStats.HeapInuse = NewGauge()
- runtimeMetrics.MemStats.HeapObjects = NewGauge()
- runtimeMetrics.MemStats.HeapReleased = NewGauge()
- runtimeMetrics.MemStats.HeapSys = NewGauge()
- runtimeMetrics.MemStats.LastGC = NewGauge()
- runtimeMetrics.MemStats.Lookups = NewGauge()
- runtimeMetrics.MemStats.Mallocs = NewGauge()
- runtimeMetrics.MemStats.MCacheInuse = NewGauge()
- runtimeMetrics.MemStats.MCacheSys = NewGauge()
- runtimeMetrics.MemStats.MSpanInuse = NewGauge()
- runtimeMetrics.MemStats.MSpanSys = NewGauge()
- runtimeMetrics.MemStats.NextGC = NewGauge()
- runtimeMetrics.MemStats.NumGC = NewGauge()
- runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
- runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
- runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
- runtimeMetrics.MemStats.StackInuse = NewGauge()
- runtimeMetrics.MemStats.StackSys = NewGauge()
- runtimeMetrics.MemStats.Sys = NewGauge()
- runtimeMetrics.MemStats.TotalAlloc = NewGauge()
- runtimeMetrics.NumCgoCall = NewGauge()
- runtimeMetrics.NumGoroutine = NewGauge()
- runtimeMetrics.NumThread = NewGauge()
- runtimeMetrics.ReadMemStats = NewTimer()
-
- r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
- r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
- r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
- r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
- r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
- r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
- r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
- r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
- r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
- r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
- r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
- r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
- r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
- r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
- r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
- r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
- r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
- r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
- r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
- r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
- r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
- r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
- r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
- r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
- r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
- r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
- r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
- r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
- r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
- r.Register("runtime.NumThread", runtimeMetrics.NumThread)
- r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
-}
diff --git a/metrics/runtime_cgo.go b/metrics/runtime_cgo.go
deleted file mode 100644
index e3391f4e8..000000000
--- a/metrics/runtime_cgo.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build cgo
-// +build !appengine
-
-package metrics
-
-import "runtime"
-
-func numCgoCall() int64 {
- return runtime.NumCgoCall()
-}
diff --git a/metrics/runtime_gccpufraction.go b/metrics/runtime_gccpufraction.go
deleted file mode 100644
index ca12c05ba..000000000
--- a/metrics/runtime_gccpufraction.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.5
-
-package metrics
-
-import "runtime"
-
-func gcCPUFraction(memStats *runtime.MemStats) float64 {
- return memStats.GCCPUFraction
-}
diff --git a/metrics/runtime_no_cgo.go b/metrics/runtime_no_cgo.go
deleted file mode 100644
index 616a3b475..000000000
--- a/metrics/runtime_no_cgo.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !cgo appengine
-
-package metrics
-
-func numCgoCall() int64 {
- return 0
-}
diff --git a/metrics/runtime_no_gccpufraction.go b/metrics/runtime_no_gccpufraction.go
deleted file mode 100644
index be96aa6f1..000000000
--- a/metrics/runtime_no_gccpufraction.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !go1.5
-
-package metrics
-
-import "runtime"
-
-func gcCPUFraction(memStats *runtime.MemStats) float64 {
- return 0
-}
diff --git a/metrics/runtime_test.go b/metrics/runtime_test.go
deleted file mode 100644
index ebbfd501a..000000000
--- a/metrics/runtime_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package metrics
-
-import (
- "runtime"
- "testing"
- "time"
-)
-
-func BenchmarkRuntimeMemStats(b *testing.B) {
- r := NewRegistry()
- RegisterRuntimeMemStats(r)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- CaptureRuntimeMemStatsOnce(r)
- }
-}
-
-func TestRuntimeMemStats(t *testing.T) {
- r := NewRegistry()
- RegisterRuntimeMemStats(r)
- CaptureRuntimeMemStatsOnce(r)
- zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests.
- runtime.GC()
- CaptureRuntimeMemStatsOnce(r)
- if count := runtimeMetrics.MemStats.PauseNs.Count(); 1 != count-zero {
- t.Fatal(count - zero)
- }
- runtime.GC()
- runtime.GC()
- CaptureRuntimeMemStatsOnce(r)
- if count := runtimeMetrics.MemStats.PauseNs.Count(); 3 != count-zero {
- t.Fatal(count - zero)
- }
- for i := 0; i < 256; i++ {
- runtime.GC()
- }
- CaptureRuntimeMemStatsOnce(r)
- if count := runtimeMetrics.MemStats.PauseNs.Count(); 259 != count-zero {
- t.Fatal(count - zero)
- }
- for i := 0; i < 257; i++ {
- runtime.GC()
- }
- CaptureRuntimeMemStatsOnce(r)
- if count := runtimeMetrics.MemStats.PauseNs.Count(); 515 != count-zero { // We lost one because there were too many GCs between captures.
- t.Fatal(count - zero)
- }
-}
-
-func TestRuntimeMemStatsNumThread(t *testing.T) {
- r := NewRegistry()
- RegisterRuntimeMemStats(r)
- CaptureRuntimeMemStatsOnce(r)
-
- if value := runtimeMetrics.NumThread.Value(); value < 1 {
- t.Fatalf("got NumThread: %d, wanted at least 1", value)
- }
-}
-
-func TestRuntimeMemStatsBlocking(t *testing.T) {
- if g := runtime.GOMAXPROCS(0); g < 2 {
- t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g)
- }
- ch := make(chan int)
- go testRuntimeMemStatsBlocking(ch)
- var memStats runtime.MemStats
- t0 := time.Now()
- runtime.ReadMemStats(&memStats)
- t1 := time.Now()
- t.Log("i++ during runtime.ReadMemStats:", <-ch)
- go testRuntimeMemStatsBlocking(ch)
- d := t1.Sub(t0)
- t.Log(d)
- time.Sleep(d)
- t.Log("i++ during time.Sleep:", <-ch)
-}
-
-func testRuntimeMemStatsBlocking(ch chan int) {
- i := 0
- for {
- select {
- case ch <- i:
- return
- default:
- i++
- }
- }
-}
diff --git a/metrics/runtimehistogram.go b/metrics/runtimehistogram.go
new file mode 100644
index 000000000..92fcbcc28
--- /dev/null
+++ b/metrics/runtimehistogram.go
@@ -0,0 +1,301 @@
+package metrics
+
+import (
+ "math"
+ "runtime/metrics"
+ "sort"
+ "sync/atomic"
+)
+
+func getOrRegisterRuntimeHistogram(name string, scale float64, r Registry) *runtimeHistogram {
+ if r == nil {
+ r = DefaultRegistry
+ }
+ constructor := func() Histogram { return newRuntimeHistogram(scale) }
+ return r.GetOrRegister(name, constructor).(*runtimeHistogram)
+}
+
+// runtimeHistogram wraps a runtime/metrics histogram.
+type runtimeHistogram struct {
+ v atomic.Value // v is a pointer to a metrics.Float64Histogram
+ scaleFactor float64
+}
+
+func newRuntimeHistogram(scale float64) *runtimeHistogram {
+ h := &runtimeHistogram{scaleFactor: scale}
+ h.update(new(metrics.Float64Histogram))
+ return h
+}
+
+func RuntimeHistogramFromData(scale float64, hist *metrics.Float64Histogram) *runtimeHistogram {
+ h := &runtimeHistogram{scaleFactor: scale}
+ h.update(hist)
+ return h
+}
+
+func (h *runtimeHistogram) update(mh *metrics.Float64Histogram) {
+ if mh == nil {
+ // The update value can be nil if the current Go version doesn't support a
+ // requested metric. It's just easier to handle nil here than putting
+ // conditionals everywhere.
+ return
+ }
+
+ s := metrics.Float64Histogram{
+ Counts: make([]uint64, len(mh.Counts)),
+ Buckets: make([]float64, len(mh.Buckets)),
+ }
+ copy(s.Counts, mh.Counts)
+ for i, b := range mh.Buckets {
+ s.Buckets[i] = b * h.scaleFactor
+ }
+ h.v.Store(&s)
+}
+
+func (h *runtimeHistogram) Clear() {
+ panic("runtimeHistogram does not support Clear")
+}
+func (h *runtimeHistogram) Update(int64) {
+ panic("runtimeHistogram does not support Update")
+}
+
+// Snapshot returns a non-changing copy of the histogram.
+func (h *runtimeHistogram) Snapshot() HistogramSnapshot {
+ hist := h.v.Load().(*metrics.Float64Histogram)
+ return newRuntimeHistogramSnapshot(hist)
+}
+
+type runtimeHistogramSnapshot struct {
+ internal *metrics.Float64Histogram
+ calculated bool
+ // The following fields are (lazily) calculated based on 'internal'
+ mean float64
+ count int64
+ min int64 // min is the lowest sample value.
+ max int64 // max is the highest sample value.
+ variance float64
+}
+
+func newRuntimeHistogramSnapshot(h *metrics.Float64Histogram) *runtimeHistogramSnapshot {
+ return &runtimeHistogramSnapshot{
+ internal: h,
+ }
+}
+
+// calc calculates the values for the snapshot. This method is not threadsafe.
+func (h *runtimeHistogramSnapshot) calc() {
+ h.calculated = true
+ var (
+ count int64 // number of samples
+ sum float64 // approx sum of all sample values
+ min int64
+ max float64
+ )
+ if len(h.internal.Counts) == 0 {
+ return
+ }
+ for i, c := range h.internal.Counts {
+ if c == 0 {
+ continue
+ }
+ if count == 0 { // Set min only first loop iteration
+ min = int64(math.Floor(h.internal.Buckets[i]))
+ }
+ count += int64(c)
+ sum += h.midpoint(i) * float64(c)
+ // Set max on every iteration
+ edge := h.internal.Buckets[i+1]
+ if math.IsInf(edge, 1) {
+ edge = h.internal.Buckets[i]
+ }
+ if edge > max {
+ max = edge
+ }
+ }
+ h.min = min
+ h.max = int64(max)
+ h.mean = sum / float64(count)
+ h.count = count
+}
+
+// Count returns the sample count.
+func (h *runtimeHistogramSnapshot) Count() int64 {
+ if !h.calculated {
+ h.calc()
+ }
+ return h.count
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (h *runtimeHistogramSnapshot) Size() int {
+ return len(h.internal.Counts)
+}
+
+// Mean returns an approximation of the mean.
+func (h *runtimeHistogramSnapshot) Mean() float64 {
+ if !h.calculated {
+ h.calc()
+ }
+ return h.mean
+}
+
+func (h *runtimeHistogramSnapshot) midpoint(bucket int) float64 {
+ high := h.internal.Buckets[bucket+1]
+ low := h.internal.Buckets[bucket]
+ if math.IsInf(high, 1) {
+ // The edge of the highest bucket can be +Inf, and it's supposed to mean that this
+ // bucket contains all remaining samples > low. We can't get the middle of an
+ // infinite range, so just return the lower bound of this bucket instead.
+ return low
+ }
+ if math.IsInf(low, -1) {
+ // Similarly, we can get -Inf in the left edge of the lowest bucket,
+ // and it means the bucket contains all remaining values < high.
+ return high
+ }
+ return (low + high) / 2
+}
+
+// StdDev approximates the standard deviation of the histogram.
+func (h *runtimeHistogramSnapshot) StdDev() float64 {
+ return math.Sqrt(h.Variance())
+}
+
+// Variance approximates the variance of the histogram.
+func (h *runtimeHistogramSnapshot) Variance() float64 {
+ if len(h.internal.Counts) == 0 {
+ return 0
+ }
+ if !h.calculated {
+ h.calc()
+ }
+ if h.count <= 1 {
+ // There is no variance when there are zero or one items.
+ return 0
+ }
+ // Variance is not calculated in 'calc', because it requires a second iteration.
+ // Therefore we calculate it lazily in this method, triggered either by
+ // a direct call to Variance or via StdDev.
+ if h.variance != 0.0 {
+ return h.variance
+ }
+ var sum float64
+
+ for i, c := range h.internal.Counts {
+ midpoint := h.midpoint(i)
+ d := midpoint - h.mean
+ sum += float64(c) * (d * d)
+ }
+ h.variance = sum / float64(h.count-1)
+ return h.variance
+}
+
+// Percentile computes the p'th percentile value.
+func (h *runtimeHistogramSnapshot) Percentile(p float64) float64 {
+ threshold := float64(h.Count()) * p
+ values := [1]float64{threshold}
+ h.computePercentiles(values[:])
+ return values[0]
+}
+
+// Percentiles computes all requested percentile values.
+func (h *runtimeHistogramSnapshot) Percentiles(ps []float64) []float64 {
+ // Compute threshold values. We need these to be sorted
+ // for the percentile computation, but restore the original
+ // order later, so keep the indexes as well.
+ count := float64(h.Count())
+ thresholds := make([]float64, len(ps))
+ indexes := make([]int, len(ps))
+ for i, percentile := range ps {
+ thresholds[i] = count * math.Max(0, math.Min(1.0, percentile))
+ indexes[i] = i
+ }
+ sort.Sort(floatsAscendingKeepingIndex{thresholds, indexes})
+
+ // Now compute. The result is stored back into the thresholds slice.
+ h.computePercentiles(thresholds)
+
+ // Put the result back into the requested order.
+ sort.Sort(floatsByIndex{thresholds, indexes})
+ return thresholds
+}
+
+func (h *runtimeHistogramSnapshot) computePercentiles(thresh []float64) {
+ var totalCount float64
+ for i, count := range h.internal.Counts {
+ totalCount += float64(count)
+
+ for len(thresh) > 0 && thresh[0] < totalCount {
+ thresh[0] = h.internal.Buckets[i]
+ thresh = thresh[1:]
+ }
+ if len(thresh) == 0 {
+ return
+ }
+ }
+}
+
+// Note: runtime/metrics.Float64Histogram is a collection of float64s, but the methods
+// below need to return int64 to satisfy the interface. The histogram provided by runtime
+// also doesn't keep track of individual samples, so results are approximated.
+
+// Max returns the highest sample value.
+func (h *runtimeHistogramSnapshot) Max() int64 {
+ if !h.calculated {
+ h.calc()
+ }
+ return h.max
+}
+
+// Min returns the lowest sample value.
+func (h *runtimeHistogramSnapshot) Min() int64 {
+ if !h.calculated {
+ h.calc()
+ }
+ return h.min
+}
+
+// Sum returns the sum of all sample values.
+func (h *runtimeHistogramSnapshot) Sum() int64 {
+ var sum float64
+ for i := range h.internal.Counts {
+ sum += h.internal.Buckets[i] * float64(h.internal.Counts[i])
+ }
+ return int64(math.Ceil(sum))
+}
+
+type floatsAscendingKeepingIndex struct {
+ values []float64
+ indexes []int
+}
+
+func (s floatsAscendingKeepingIndex) Len() int {
+ return len(s.values)
+}
+
+func (s floatsAscendingKeepingIndex) Less(i, j int) bool {
+ return s.values[i] < s.values[j]
+}
+
+func (s floatsAscendingKeepingIndex) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ s.indexes[i], s.indexes[j] = s.indexes[j], s.indexes[i]
+}
+
+type floatsByIndex struct {
+ values []float64
+ indexes []int
+}
+
+func (s floatsByIndex) Len() int {
+ return len(s.values)
+}
+
+func (s floatsByIndex) Less(i, j int) bool {
+ return s.indexes[i] < s.indexes[j]
+}
+
+func (s floatsByIndex) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ s.indexes[i], s.indexes[j] = s.indexes[j], s.indexes[i]
+}
diff --git a/metrics/runtimehistogram_test.go b/metrics/runtimehistogram_test.go
new file mode 100644
index 000000000..cf7e36420
--- /dev/null
+++ b/metrics/runtimehistogram_test.go
@@ -0,0 +1,162 @@
+package metrics
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime/metrics"
+ "testing"
+ "time"
+)
+
+var _ Histogram = (*runtimeHistogram)(nil)
+
+type runtimeHistogramTest struct {
+ h metrics.Float64Histogram
+
+ Count int64
+ Min int64
+ Max int64
+ Sum int64
+ Mean float64
+ Variance float64
+ StdDev float64
+ Percentiles []float64 // .5 .8 .9 .99 .995
+}
+
+// This test checks the results of statistical functions implemented
+// by runtimeHistogramSnapshot.
+func TestRuntimeHistogramStats(t *testing.T) {
+ tests := []runtimeHistogramTest{
+ 0: {
+ h: metrics.Float64Histogram{
+ Counts: []uint64{},
+ Buckets: []float64{},
+ },
+ Count: 0,
+ Max: 0,
+ Min: 0,
+ Sum: 0,
+ Mean: 0,
+ Variance: 0,
+ StdDev: 0,
+ Percentiles: []float64{0, 0, 0, 0, 0},
+ },
+ 1: {
+ // This checks the case where the highest bucket is +Inf.
+ h: metrics.Float64Histogram{
+ Counts: []uint64{0, 1, 2},
+ Buckets: []float64{0, 0.5, 1, math.Inf(1)},
+ },
+ Count: 3,
+ Max: 1,
+ Min: 0,
+ Sum: 3,
+ Mean: 0.9166666,
+ Percentiles: []float64{1, 1, 1, 1, 1},
+ Variance: 0.020833,
+ StdDev: 0.144433,
+ },
+ 2: {
+ h: metrics.Float64Histogram{
+ Counts: []uint64{8, 6, 3, 1},
+ Buckets: []float64{12, 16, 18, 24, 25},
+ },
+ Count: 18,
+ Max: 25,
+ Min: 12,
+ Sum: 270,
+ Mean: 16.75,
+ Variance: 10.3015,
+ StdDev: 3.2096,
+ Percentiles: []float64{16, 18, 18, 24, 24},
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(fmt.Sprint(i), func(t *testing.T) {
+ s := RuntimeHistogramFromData(1.0, &test.h).Snapshot()
+
+ if v := s.Count(); v != test.Count {
+ t.Errorf("Count() = %v, want %v", v, test.Count)
+ }
+ if v := s.Min(); v != test.Min {
+ t.Errorf("Min() = %v, want %v", v, test.Min)
+ }
+ if v := s.Max(); v != test.Max {
+ t.Errorf("Max() = %v, want %v", v, test.Max)
+ }
+ if v := s.Sum(); v != test.Sum {
+ t.Errorf("Sum() = %v, want %v", v, test.Sum)
+ }
+ if v := s.Mean(); !approxEqual(v, test.Mean, 0.0001) {
+ t.Errorf("Mean() = %v, want %v", v, test.Mean)
+ }
+ if v := s.Variance(); !approxEqual(v, test.Variance, 0.0001) {
+ t.Errorf("Variance() = %v, want %v", v, test.Variance)
+ }
+ if v := s.StdDev(); !approxEqual(v, test.StdDev, 0.0001) {
+ t.Errorf("StdDev() = %v, want %v", v, test.StdDev)
+ }
+ ps := []float64{.5, .8, .9, .99, .995}
+ if v := s.Percentiles(ps); !reflect.DeepEqual(v, test.Percentiles) {
+ t.Errorf("Percentiles(%v) = %v, want %v", ps, v, test.Percentiles)
+ }
+ })
+ }
+}
+
+func approxEqual(x, y, ε float64) bool {
+ if math.IsInf(x, -1) && math.IsInf(y, -1) {
+ return true
+ }
+ if math.IsInf(x, 1) && math.IsInf(y, 1) {
+ return true
+ }
+ if math.IsNaN(x) && math.IsNaN(y) {
+ return true
+ }
+ return math.Abs(x-y) < ε
+}
+
+// This test verifies that requesting Percentiles in unsorted order
+// returns them in the requested order.
+func TestRuntimeHistogramStatsPercentileOrder(t *testing.T) {
+ s := RuntimeHistogramFromData(1.0, &metrics.Float64Histogram{
+ Counts: []uint64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ Buckets: []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
+ }).Snapshot()
+ result := s.Percentiles([]float64{1, 0.2, 0.5, 0.1, 0.2})
+ expected := []float64{10, 2, 5, 1, 2}
+ if !reflect.DeepEqual(result, expected) {
+ t.Fatal("wrong result:", result)
+ }
+}
+
+func BenchmarkRuntimeHistogramSnapshotRead(b *testing.B) {
+ var sLatency = "7\xff\x81\x03\x01\x01\x10Float64Histogram\x01\xff\x82\x00\x01\x02\x01\x06Counts\x01\xff\x84\x00\x01\aBuckets\x01\xff\x86\x00\x00\x00\x16\xff\x83\x02\x01\x01\b[]uint64\x01\xff\x84\x00\x01\x06\x00\x00\x17\xff\x85\x02\x01\x01\t[]float64\x01\xff\x86\x00\x01\b\x00\x00\xfe\x06T\xff\x82\x01\xff\xa2\x00\xfe\r\xef\x00\x01\x02\x02\x04\x05\x04\b\x15\x17 B?6.L;$!2) \x1a? \x190aH7FY6#\x190\x1d\x14\x10\x1b\r\t\x04\x03\x01\x01\x00\x03\x02\x00\x03\x05\x05\x02\x02\x06\x04\v\x06\n\x15\x18\x13'&.\x12=H/L&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xff\xa3\xfe\xf0\xff\x00\xf8\x95\xd6&\xe8\v.q>\xf8\x95\xd6&\xe8\v.\x81>\xf8\xdfA:\xdc\x11ʼn>\xf8\x95\xd6&\xe8\v.\x91>\xf8:\x8c0\xe2\x8ey\x95>\xf8\xdfA:\xdc\x11ř>\xf8\x84\xf7C֔\x10\x9e>\xf8\x95\xd6&\xe8\v.\xa1>\xf8:\x8c0\xe2\x8ey\xa5>\xf8\xdfA:\xdc\x11ũ>\xf8\x84\xf7C֔\x10\xae>\xf8\x95\xd6&\xe8\v.\xb1>\xf8:\x8c0\xe2\x8ey\xb5>\xf8\xdfA:\xdc\x11Ź>\xf8\x84\xf7C֔\x10\xbe>\xf8\x95\xd6&\xe8\v.\xc1>\xf8:\x8c0\xe2\x8ey\xc5>\xf8\xdfA:\xdc\x11\xc5\xc9>\xf8\x84\xf7C֔\x10\xce>\xf8\x95\xd6&\xe8\v.\xd1>\xf8:\x8c0\xe2\x8ey\xd5>\xf8\xdfA:\xdc\x11\xc5\xd9>\xf8\x84\xf7C֔\x10\xde>\xf8\x95\xd6&\xe8\v.\xe1>\xf8:\x8c0\xe2\x8ey\xe5>\xf8\xdfA:\xdc\x11\xc5\xe9>\xf8\x84\xf7C֔\x10\xee>\xf8\x95\xd6&\xe8\v.\xf1>\xf8:\x8c0\xe2\x8ey\xf5>\xf8\xdfA:\xdc\x11\xc5\xf9>\xf8\x84\xf7C֔\x10\xfe>\xf8\x95\xd6&\xe8\v.\x01?\xf8:\x8c0\xe2\x8ey\x05?\xf8\xdfA:\xdc\x11\xc5\t?\xf8\x84\xf7C֔\x10\x0e?\xf8\x95\xd6&\xe8\v.\x11?\xf8:\x8c0\xe2\x8ey\x15?\xf8\xdfA:\xdc\x11\xc5\x19?\xf8\x84\xf7C֔\x10\x1e?\xf8\x95\xd6&\xe8\v.!?\xf8:\x8c0\xe2\x8ey%?\xf8\xdfA:\xdc\x11\xc5)?\xf8\x84\xf7C֔\x10.?\xf8\x95\xd6&\xe8\v.1?\xf8:\x8c0\xe2\x8ey5?\xf8\xdfA:\xdc\x11\xc59?\xf8\x84\xf7C֔\x10>?\xf8\x95\xd6&\xe8\v.A?\xf8:\x8c0\xe2\x8eyE?\xf8\xdfA:\xdc\x11\xc5I?\xf8\x84\xf7C֔\x10N?\xf8\x95\xd6&\xe8\v.Q?\xf8:\x8c0\xe2\x8eyU?\xf8\xdfA:\xdc\x11\xc5Y?\xf8\x84\xf7C֔\x10^?\xf8\x95\xd6&\xe8\v.a?\xf8:\x8c0\xe2\x8eye?\xf8\xdfA:\xdc\x11\xc5i?\xf8\x84\xf7C֔\x10n?\xf8\x95\xd6&\xe8\v.q?\xf8:\x8c0\xe2\x8eyu?\xf8\xdfA:\xdc\x11\xc5y?\xf8\x84\xf7C֔\x10~?\xf8\x95\xd6&\xe8\v.\x81?\xf8:\x8c0\xe2\x8ey\x85?\xf8\xdfA:\xdc\x11ʼn?\xf8\x84\xf7C֔\x10\x8e?\xf8\x95\xd6&\xe8\v.\x91?\xf8:\x8c0\xe2\x8ey\x95?\xf8\xdfA:\xdc\x11ř?\xf8\x84\xf7C֔\x10\x9e?\xf8\x95\xd6&\xe8\v.\xa1?\xf8:\x8c0\xe2\x8ey\xa5?\xf8\xdfA:\xdc\x11ũ?\xf8\x84\xf7C֔\x10\xae?\xf8\x95\xd6&\xe8\v.\xb1?\xf8:\x8c0\xe2\x8ey\xb5?\xf8\xdfA:\xdc\x11Ź?\xf8\x84\xf7C֔\x10\xbe?\xf8\x95\xd6&\xe8\v.\xc1?\xf8:\x8c0\xe2\x8ey\xc5?\xf8\xdfA:\xdc\x11\xc5\xc9?\xf8\x84\xf7C֔\x10\xce?\xf8\x95\xd6&\xe8\v.\xd1?\xf8:\x8c0\xe2\x8ey\xd5?\xf8\xdfA:\xdc\x11\xc5\xd9?\xf8\x84\xf7C֔\x10\xde?\xf8\x95\xd6&\xe8\v.\xe1?\xf8:\x8c0\xe2\x8ey\xe5?\xf8\xdfA:\xdc\x11\xc5\xe9?\xf8\x84\xf7C֔\x10\xee?\xf8\x95\xd6&\xe8\v.\xf1?\xf8:\x8c0\xe2\x8ey\xf5?\xf8\xdfA:\xdc\x11\xc5\xf9?\xf8\x84\xf7C֔\x10\xfe?\xf8\x95\xd6&\xe8\v.\x01@\xf8:\x8c0\xe2\x8ey\x05@\xf8\xdfA:\xdc\x11\xc5\t@\xf8\x84\xf7C֔\x10\x0e@\xf8\x95\xd6&\xe8\v.\x11@\xf8:\x8c0\xe2\x8ey\x15@\xf8\xdfA:\xdc\x11\xc5\x19@\xf8\x84\xf7C֔\x10\x1e@\xf8\x95\xd6&\xe8\v.!@\xf8:\x8c0\xe2\x8ey%@\xf8\xdfA:\xdc\x11\xc5)@\xf8\x84\xf7C֔\x10.@\xf8\x95\xd6&\xe8\v.1@\xf8:\x8c0\xe2\x8ey5@\xf8\xdfA:\xdc\x11\xc59@\xf8\x84\xf7C֔\x10>@\xf8\x95\xd6&\xe8\v.A@\xf8:\x8c0\xe2\x8eyE@\xf8\xdfA:\xdc\x11\xc5I@\xf8\x84\xf7C֔\x10N@\xf8\x95\xd6&\xe8\v.Q@\xf8:\x8c0\xe2\x8eyU@\xf8\xdfA:\xdc\x11\xc5Y@\xf8\x84\xf7C֔\x10^@\xf8\x95\xd6&\xe8\v.a@\xf8:\x8c0\xe2\x8eye@\xf8\xdfA:\xdc\x11\xc5i@\xf8\x84\xf7C֔\x10n@\xf8\x95\xd6&\xe8\v.q@\xf8:\x8c0\xe2\x8eyu@\xf8\xdfA:\xdc\x11\xc5y@\xf8\x84\xf7C֔\x10~@\xf8\x95\xd6&\xe8\v.\x81@\xf8:\x8c0\xe2\x8ey\x85@\xf8\xdfA:\xdc\x11ʼn@\xf8\x84\xf7C֔\x10\x8e@\xf8\x95\xd6&\xe8\v.\x91@\xf8:\x8c0\xe2\x8ey\x95@\xf8\xdfA:\xdc\x11ř@\xf8\x84\xf7C֔\x10\x9e@\xf8\x95\xd6&\xe8\v.\xa1@\xf8:\x8c0\xe2\x8ey\xa5@\xf8\xdfA:\xdc\x11ũ@\xf8\x84\xf7C֔\x10\xae@\xf8\x95\xd6&\xe8\v.\xb1@\xf8:\x8c0\xe2\x8ey\xb5@\xf8\xdfA:\xdc\x11Ź@\xf8\x84\xf7C֔\x10\xbe@\xf8\x95\xd6&\xe8\v.\xc1@\xf8:\x8c0\xe2\x8ey\xc5@\xf8\xdfA:\xdc\x11\xc5\xc9@\xf8\x84\xf7C֔\x10\xce@\xf8\x95\xd6&\xe8\v.\xd1@\xf8:\x8c0\xe2\x8ey\xd5@\xf8\xdfA:\xdc\x11\xc5\xd9@\xf8\x84\xf7C֔\x10\xde@\xf8\x95\xd6&\xe8\v.\xe1@\xf8:\x8c0\xe2\x8ey\xe5@\xf8\xdfA:\xdc\x11\xc5\xe9@\xf8\x84\xf7C֔\x10\xee@\xf8\x95\xd6&\xe8\v.\xf1@\xf8:\x8c0\xe2\x8ey\xf5@\xf8\xdfA:\xdc\x11\xc5\xf9@\xf8\x84\xf7C֔\x10\xfe@\xf8\x95\xd6&\xe8\v.\x01A\xfe\xf0\x7f\x00"
+
+ dserialize := func(data string) *metrics.Float64Histogram {
+ var res metrics.Float64Histogram
+ if err := gob.NewDecoder(bytes.NewReader([]byte(data))).Decode(&res); err != nil {
+ panic(err)
+ }
+ return &res
+ }
+ latency := RuntimeHistogramFromData(float64(time.Second), dserialize(sLatency))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ snap := latency.Snapshot()
+ // These are the fields that influxdb accesses
+ _ = snap.Count()
+ _ = snap.Max()
+ _ = snap.Mean()
+ _ = snap.Min()
+ _ = snap.StdDev()
+ _ = snap.Variance()
+ _ = snap.Percentiles([]float64{0.25, 0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
+ }
+}
diff --git a/metrics/sample.go b/metrics/sample.go
index 5c4845a4f..5398dd42d 100644
--- a/metrics/sample.go
+++ b/metrics/sample.go
@@ -3,17 +3,15 @@ package metrics
import (
"math"
"math/rand"
- "sort"
"sync"
"time"
+
+ "golang.org/x/exp/slices"
)
const rescaleThreshold = time.Hour
-// Samples maintain a statistically-significant selection of values from
-// a stream.
-type Sample interface {
- Clear()
+type SampleSnapshot interface {
Count() int64
Max() int64
Mean() float64
@@ -21,14 +19,19 @@ type Sample interface {
Percentile(float64) float64
Percentiles([]float64) []float64
Size() int
- Snapshot() Sample
StdDev() float64
Sum() int64
- Update(int64)
- Values() []int64
Variance() float64
}
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+ Snapshot() SampleSnapshot
+ Clear()
+ Update(int64)
+}
+
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
// Decay Model for Streaming Systems".
@@ -41,6 +44,7 @@ type ExpDecaySample struct {
reservoirSize int
t0, t1 time.Time
values *expDecaySampleHeap
+ rand *rand.Rand
}
// NewExpDecaySample constructs a new exponentially-decaying sample with the
@@ -59,6 +63,12 @@ func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
return s
}
+// SetRand sets the random source (useful in tests)
+func (s *ExpDecaySample) SetRand(prng *rand.Rand) Sample {
+ s.rand = prng
+ return s
+}
+
// Clear clears all samples.
func (s *ExpDecaySample) Clear() {
s.mutex.Lock()
@@ -69,72 +79,29 @@ func (s *ExpDecaySample) Clear() {
s.values.Clear()
}
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *ExpDecaySample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Max() int64 {
- return SampleMax(s.Values())
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *ExpDecaySample) Mean() float64 {
- return SampleMean(s.Values())
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Min() int64 {
- return SampleMin(s.Values())
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *ExpDecaySample) Percentile(p float64) float64 {
- return SamplePercentile(s.Values(), p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.Values(), ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *ExpDecaySample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.values.Size()
-}
-
// Snapshot returns a read-only copy of the sample.
-func (s *ExpDecaySample) Snapshot() Sample {
+func (s *ExpDecaySample) Snapshot() SampleSnapshot {
s.mutex.Lock()
defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return &SampleSnapshot{
- count: s.count,
- values: values,
+ var (
+ samples = s.values.Values()
+ values = make([]int64, len(samples))
+ max int64 = math.MinInt64
+ min int64 = math.MaxInt64
+ sum int64
+ )
+ for i, item := range samples {
+ v := item.v
+ values[i] = v
+ sum += v
+ if v > max {
+ max = v
+ }
+ if v < min {
+ min = v
+ }
}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *ExpDecaySample) StdDev() float64 {
- return SampleStdDev(s.Values())
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *ExpDecaySample) Sum() int64 {
- return SampleSum(s.Values())
+ return newSampleSnapshotPrecalculated(s.count, values, min, max, sum)
}
// Update samples a new value.
@@ -142,23 +109,6 @@ func (s *ExpDecaySample) Update(v int64) {
s.update(time.Now(), v)
}
-// Values returns a copy of the values in the sample.
-func (s *ExpDecaySample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *ExpDecaySample) Variance() float64 {
- return SampleVariance(s.Values())
-}
-
// update samples a new value at a particular timestamp. This is a method all
// its own to facilitate testing.
func (s *ExpDecaySample) update(t time.Time, v int64) {
@@ -168,8 +118,14 @@ func (s *ExpDecaySample) update(t time.Time, v int64) {
if s.values.Size() == s.reservoirSize {
s.values.Pop()
}
+ var f64 float64
+ if s.rand != nil {
+ f64 = s.rand.Float64()
+ } else {
+ f64 = rand.Float64()
+ }
s.values.Push(expDecaySample{
- k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+ k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / f64,
v: v,
})
if t.After(s.t1) {
@@ -188,207 +144,160 @@ func (s *ExpDecaySample) update(t time.Time, v int64) {
// NilSample is a no-op Sample.
type NilSample struct{}
-// Clear is a no-op.
-func (NilSample) Clear() {}
-
-// Count is a no-op.
-func (NilSample) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilSample) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilSample) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilSample) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilSample) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilSample) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Size is a no-op.
-func (NilSample) Size() int { return 0 }
-
-// Sample is a no-op.
-func (NilSample) Snapshot() Sample { return NilSample{} }
-
-// StdDev is a no-op.
-func (NilSample) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilSample) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilSample) Update(v int64) {}
-
-// Values is a no-op.
-func (NilSample) Values() []int64 { return []int64{} }
-
-// Variance is a no-op.
-func (NilSample) Variance() float64 { return 0.0 }
-
-// SampleMax returns the maximum value of the slice of int64.
-func SampleMax(values []int64) int64 {
- if 0 == len(values) {
- return 0
- }
- var max int64 = math.MinInt64
- for _, v := range values {
- if max < v {
- max = v
- }
- }
- return max
-}
-
-// SampleMean returns the mean value of the slice of int64.
-func SampleMean(values []int64) float64 {
- if 0 == len(values) {
- return 0.0
- }
- return float64(SampleSum(values)) / float64(len(values))
-}
-
-// SampleMin returns the minimum value of the slice of int64.
-func SampleMin(values []int64) int64 {
- if 0 == len(values) {
- return 0
- }
- var min int64 = math.MaxInt64
- for _, v := range values {
- if min > v {
- min = v
- }
- }
- return min
-}
+func (NilSample) Clear() {}
+func (NilSample) Snapshot() SampleSnapshot { return (*emptySnapshot)(nil) }
+func (NilSample) Update(v int64) {}
// SamplePercentiles returns an arbitrary percentile of the slice of int64.
-func SamplePercentile(values int64Slice, p float64) float64 {
- return SamplePercentiles(values, []float64{p})[0]
+func SamplePercentile(values []int64, p float64) float64 {
+ return CalculatePercentiles(values, []float64{p})[0]
}
-// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
-// int64.
-func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+// CalculatePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64. This method returns interpolated results, so e.g if there are only two
+// values, [0, 10], a 50% percentile will land between them.
+//
+// Note: As a side-effect, this method will also sort the slice of values.
+// Note2: The input format for percentiles is NOT percent! To express 50%, use 0.5, not 50.
+func CalculatePercentiles(values []int64, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
- if size > 0 {
- sort.Sort(values)
- for i, p := range ps {
- pos := p * float64(size+1)
- if pos < 1.0 {
- scores[i] = float64(values[0])
- } else if pos >= float64(size) {
- scores[i] = float64(values[size-1])
- } else {
- lower := float64(values[int(pos)-1])
- upper := float64(values[int(pos)])
- scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
- }
+ if size == 0 {
+ return scores
+ }
+ slices.Sort(values)
+ for i, p := range ps {
+ pos := p * float64(size+1)
+
+ if pos < 1.0 {
+ scores[i] = float64(values[0])
+ } else if pos >= float64(size) {
+ scores[i] = float64(values[size-1])
+ } else {
+ lower := float64(values[int(pos)-1])
+ upper := float64(values[int(pos)])
+ scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
}
}
return scores
}
-// SampleSnapshot is a read-only copy of another Sample.
-type SampleSnapshot struct {
+// sampleSnapshot is a read-only copy of another Sample.
+type sampleSnapshot struct {
count int64
values []int64
+
+ max int64
+ min int64
+ mean float64
+ sum int64
+ variance float64
}
-func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
- return &SampleSnapshot{
+// newSampleSnapshotPrecalculated creates a read-only sampleSnapShot, using
+// precalculated sums to avoid iterating the values
+func newSampleSnapshotPrecalculated(count int64, values []int64, min, max, sum int64) *sampleSnapshot {
+ if len(values) == 0 {
+ return &sampleSnapshot{
+ count: count,
+ values: values,
+ }
+ }
+ return &sampleSnapshot{
count: count,
values: values,
+ max: max,
+ min: min,
+ mean: float64(sum) / float64(len(values)),
+ sum: sum,
}
}
-// Clear panics.
-func (*SampleSnapshot) Clear() {
- panic("Clear called on a SampleSnapshot")
+// newSampleSnapshot creates a read-only sampleSnapShot, and calculates some
+// numbers.
+func newSampleSnapshot(count int64, values []int64) *sampleSnapshot {
+ var (
+ max int64 = math.MinInt64
+ min int64 = math.MaxInt64
+ sum int64
+ )
+ for _, v := range values {
+ sum += v
+ if v > max {
+ max = v
+ }
+ if v < min {
+ min = v
+ }
+ }
+ return newSampleSnapshotPrecalculated(count, values, min, max, sum)
}
// Count returns the count of inputs at the time the snapshot was taken.
-func (s *SampleSnapshot) Count() int64 { return s.count }
+func (s *sampleSnapshot) Count() int64 { return s.count }
// Max returns the maximal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+func (s *sampleSnapshot) Max() int64 { return s.max }
// Mean returns the mean value at the time the snapshot was taken.
-func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+func (s *sampleSnapshot) Mean() float64 { return s.mean }
// Min returns the minimal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+func (s *sampleSnapshot) Min() int64 { return s.min }
// Percentile returns an arbitrary percentile of values at the time the
// snapshot was taken.
-func (s *SampleSnapshot) Percentile(p float64) float64 {
+func (s *sampleSnapshot) Percentile(p float64) float64 {
return SamplePercentile(s.values, p)
}
// Percentiles returns a slice of arbitrary percentiles of values at the time
// the snapshot was taken.
-func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.values, ps)
+func (s *sampleSnapshot) Percentiles(ps []float64) []float64 {
+ return CalculatePercentiles(s.values, ps)
}
// Size returns the size of the sample at the time the snapshot was taken.
-func (s *SampleSnapshot) Size() int { return len(s.values) }
+func (s *sampleSnapshot) Size() int { return len(s.values) }
// Snapshot returns the snapshot.
-func (s *SampleSnapshot) Snapshot() Sample { return s }
+func (s *sampleSnapshot) Snapshot() SampleSnapshot { return s }
// StdDev returns the standard deviation of values at the time the snapshot was
// taken.
-func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+func (s *sampleSnapshot) StdDev() float64 {
+ if s.variance == 0.0 {
+ s.variance = SampleVariance(s.mean, s.values)
+ }
+ return math.Sqrt(s.variance)
+}
// Sum returns the sum of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
-
-// Update panics.
-func (*SampleSnapshot) Update(int64) {
- panic("Update called on a SampleSnapshot")
-}
+func (s *sampleSnapshot) Sum() int64 { return s.sum }
// Values returns a copy of the values in the sample.
-func (s *SampleSnapshot) Values() []int64 {
+func (s *sampleSnapshot) Values() []int64 {
values := make([]int64, len(s.values))
copy(values, s.values)
return values
}
// Variance returns the variance of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
-
-// SampleStdDev returns the standard deviation of the slice of int64.
-func SampleStdDev(values []int64) float64 {
- return math.Sqrt(SampleVariance(values))
-}
-
-// SampleSum returns the sum of the slice of int64.
-func SampleSum(values []int64) int64 {
- var sum int64
- for _, v := range values {
- sum += v
+func (s *sampleSnapshot) Variance() float64 {
+ if s.variance == 0.0 {
+ s.variance = SampleVariance(s.mean, s.values)
}
- return sum
+ return s.variance
}
// SampleVariance returns the variance of the slice of int64.
-func SampleVariance(values []int64) float64 {
- if 0 == len(values) {
+func SampleVariance(mean float64, values []int64) float64 {
+ if len(values) == 0 {
return 0.0
}
- m := SampleMean(values)
var sum float64
for _, v := range values {
- d := float64(v) - m
+ d := float64(v) - mean
sum += d * d
}
return sum / float64(len(values))
@@ -402,6 +311,7 @@ type UniformSample struct {
mutex sync.Mutex
reservoirSize int
values []int64
+ rand *rand.Rand
}
// NewUniformSample constructs a new uniform sample with the given reservoir
@@ -416,6 +326,12 @@ func NewUniformSample(reservoirSize int) Sample {
}
}
+// SetRand sets the random source (useful in tests)
+func (s *UniformSample) SetRand(prng *rand.Rand) Sample {
+ s.rand = prng
+ return s
+}
+
// Clear clears all samples.
func (s *UniformSample) Clear() {
s.mutex.Lock()
@@ -424,83 +340,14 @@ func (s *UniformSample) Clear() {
s.values = make([]int64, 0, s.reservoirSize)
}
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *UniformSample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *UniformSample) Max() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMax(s.values)
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *UniformSample) Mean() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMean(s.values)
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *UniformSample) Min() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMin(s.values)
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *UniformSample) Percentile(p float64) float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *UniformSample) Percentiles(ps []float64) []float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *UniformSample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return len(s.values)
-}
-
// Snapshot returns a read-only copy of the sample.
-func (s *UniformSample) Snapshot() Sample {
+func (s *UniformSample) Snapshot() SampleSnapshot {
s.mutex.Lock()
- defer s.mutex.Unlock()
values := make([]int64, len(s.values))
copy(values, s.values)
- return &SampleSnapshot{
- count: s.count,
- values: values,
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *UniformSample) StdDev() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleStdDev(s.values)
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *UniformSample) Sum() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleSum(s.values)
+ count := s.count
+ s.mutex.Unlock()
+ return newSampleSnapshot(count, values)
}
// Update samples a new value.
@@ -511,29 +358,18 @@ func (s *UniformSample) Update(v int64) {
if len(s.values) < s.reservoirSize {
s.values = append(s.values, v)
} else {
- r := rand.Int63n(s.count)
+ var r int64
+ if s.rand != nil {
+ r = s.rand.Int63n(s.count)
+ } else {
+ r = rand.Int63n(s.count)
+ }
if r < int64(len(s.values)) {
s.values[int(r)] = v
}
}
}
-// Values returns a copy of the values in the sample.
-func (s *UniformSample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *UniformSample) Variance() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleVariance(s.values)
-}
-
// expDecaySample represents an individual sample in a heap.
type expDecaySample struct {
k float64
@@ -608,9 +444,3 @@ func (h *expDecaySampleHeap) down(i, n int) {
i = j
}
}
-
-type int64Slice []int64
-
-func (p int64Slice) Len() int { return len(p) }
-func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/metrics/sample_test.go b/metrics/sample_test.go
index d60e99c5b..796735705 100644
--- a/metrics/sample_test.go
+++ b/metrics/sample_test.go
@@ -1,34 +1,43 @@
package metrics
import (
+ "math"
"math/rand"
"runtime"
"testing"
"time"
)
+const epsilonPercentile = .00000000001
+
// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
// expensive computations like Variance, the cost of copying the Sample, as
// approximated by a make and copy, is much greater than the cost of the
// computation for small samples and only slightly less for large samples.
func BenchmarkCompute1000(b *testing.B) {
s := make([]int64, 1000)
+ var sum int64
for i := 0; i < len(s); i++ {
s[i] = int64(i)
+ sum += int64(i)
}
+ mean := float64(sum) / float64(len(s))
b.ResetTimer()
for i := 0; i < b.N; i++ {
- SampleVariance(s)
+ SampleVariance(mean, s)
}
}
func BenchmarkCompute1000000(b *testing.B) {
s := make([]int64, 1000000)
+ var sum int64
for i := 0; i < len(s); i++ {
s[i] = int64(i)
+ sum += int64(i)
}
+ mean := float64(sum) / float64(len(s))
b.ResetTimer()
for i := 0; i < b.N; i++ {
- SampleVariance(s)
+ SampleVariance(mean, s)
}
}
func BenchmarkCopy1000(b *testing.B) {
@@ -78,68 +87,42 @@ func BenchmarkUniformSample1028(b *testing.B) {
benchmarkSample(b, NewUniformSample(1028))
}
-func TestExpDecaySample10(t *testing.T) {
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
- for i := 0; i < 10; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); 10 != size {
- t.Errorf("s.Count(): 10 != %v\n", size)
- }
- if size := s.Size(); 10 != size {
- t.Errorf("s.Size(): 10 != %v\n", size)
- }
- if l := len(s.Values()); 10 != l {
- t.Errorf("len(s.Values()): 10 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 10 || v < 0 {
- t.Errorf("out of range [0, 10): %v\n", v)
- }
+func min(a, b int) int {
+ if a < b {
+ return a
}
+ return b
}
-func TestExpDecaySample100(t *testing.T) {
- rand.Seed(1)
- s := NewExpDecaySample(1000, 0.01)
- for i := 0; i < 100; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); 100 != size {
- t.Errorf("s.Count(): 100 != %v\n", size)
- }
- if size := s.Size(); 100 != size {
- t.Errorf("s.Size(): 100 != %v\n", size)
- }
- if l := len(s.Values()); 100 != l {
- t.Errorf("len(s.Values()): 100 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 100 || v < 0 {
- t.Errorf("out of range [0, 100): %v\n", v)
+func TestExpDecaySample(t *testing.T) {
+ for _, tc := range []struct {
+ reservoirSize int
+ alpha float64
+ updates int
+ }{
+ {100, 0.99, 10},
+ {1000, 0.01, 100},
+ {100, 0.99, 1000},
+ } {
+ sample := NewExpDecaySample(tc.reservoirSize, tc.alpha)
+ for i := 0; i < tc.updates; i++ {
+ sample.Update(int64(i))
}
- }
-}
-
-func TestExpDecaySample1000(t *testing.T) {
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
- for i := 0; i < 1000; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); 1000 != size {
- t.Errorf("s.Count(): 1000 != %v\n", size)
- }
- if size := s.Size(); 100 != size {
- t.Errorf("s.Size(): 100 != %v\n", size)
- }
- if l := len(s.Values()); 100 != l {
- t.Errorf("len(s.Values()): 100 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 1000 || v < 0 {
- t.Errorf("out of range [0, 1000): %v\n", v)
+ snap := sample.Snapshot()
+ if have, want := int(snap.Count()), tc.updates; have != want {
+ t.Errorf("have %d want %d", have, want)
+ }
+ if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want {
+ t.Errorf("have %d want %d", have, want)
+ }
+ values := snap.(*sampleSnapshot).values
+ if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want {
+ t.Errorf("have %d want %d", have, want)
+ }
+ for _, v := range values {
+ if v > int64(tc.updates) || v < 0 {
+ t.Errorf("out of range [0, %d): %v", tc.updates, v)
+ }
}
}
}
@@ -149,16 +132,16 @@ func TestExpDecaySample1000(t *testing.T) {
// The priority becomes +Inf quickly after starting if this is done,
// effectively freezing the set of samples until a rescale step happens.
func TestExpDecaySampleNanosecondRegression(t *testing.T) {
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
+ sw := NewExpDecaySample(100, 0.99)
for i := 0; i < 100; i++ {
- s.Update(10)
+ sw.Update(10)
}
time.Sleep(1 * time.Millisecond)
for i := 0; i < 100; i++ {
- s.Update(20)
+ sw.Update(20)
}
- v := s.Values()
+ s := sw.Snapshot()
+ v := s.(*sampleSnapshot).values
avg := float64(0)
for i := 0; i < len(v); i++ {
avg += float64(v[i])
@@ -182,8 +165,7 @@ func TestExpDecaySampleRescale(t *testing.T) {
func TestExpDecaySampleSnapshot(t *testing.T) {
now := time.Now()
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
+ s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
@@ -194,30 +176,31 @@ func TestExpDecaySampleSnapshot(t *testing.T) {
func TestExpDecaySampleStatistics(t *testing.T) {
now := time.Now()
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
+ s := NewExpDecaySample(100, 0.99).(*ExpDecaySample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
}
- testExpDecaySampleStatistics(t, s)
+ testExpDecaySampleStatistics(t, s.Snapshot())
}
func TestUniformSample(t *testing.T) {
- rand.Seed(1)
- s := NewUniformSample(100)
+ sw := NewUniformSample(100)
for i := 0; i < 1000; i++ {
- s.Update(int64(i))
+ sw.Update(int64(i))
}
- if size := s.Count(); 1000 != size {
+ s := sw.Snapshot()
+ if size := s.Count(); size != 1000 {
t.Errorf("s.Count(): 1000 != %v\n", size)
}
- if size := s.Size(); 100 != size {
+ if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size)
}
- if l := len(s.Values()); 100 != l {
+ values := s.(*sampleSnapshot).values
+
+ if l := len(values); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l)
}
- for _, v := range s.Values() {
+ for _, v := range values {
if v > 1000 || v < 0 {
t.Errorf("out of range [0, 100): %v\n", v)
}
@@ -225,13 +208,13 @@ func TestUniformSample(t *testing.T) {
}
func TestUniformSampleIncludesTail(t *testing.T) {
- rand.Seed(1)
- s := NewUniformSample(100)
+ sw := NewUniformSample(100)
max := 100
for i := 0; i < max; i++ {
- s.Update(int64(i))
+ sw.Update(int64(i))
}
- v := s.Values()
+ s := sw.Snapshot()
+ v := s.(*sampleSnapshot).values
sum := 0
exp := (max - 1) * max / 2
for i := 0; i < len(v); i++ {
@@ -243,7 +226,7 @@ func TestUniformSampleIncludesTail(t *testing.T) {
}
func TestUniformSampleSnapshot(t *testing.T) {
- s := NewUniformSample(100)
+ s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
@@ -253,12 +236,11 @@ func TestUniformSampleSnapshot(t *testing.T) {
}
func TestUniformSampleStatistics(t *testing.T) {
- rand.Seed(1)
- s := NewUniformSample(100)
+ s := NewUniformSample(100).(*UniformSample).SetRand(rand.New(rand.NewSource(1)))
for i := 1; i <= 10000; i++ {
s.Update(int64(i))
}
- testUniformSampleStatistics(t, s)
+ testUniformSampleStatistics(t, s.Snapshot())
}
func benchmarkSample(b *testing.B, s Sample) {
@@ -275,58 +257,58 @@ func benchmarkSample(b *testing.B, s Sample) {
b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
}
-func testExpDecaySampleStatistics(t *testing.T, s Sample) {
- if count := s.Count(); 10000 != count {
+func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) {
+ if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
- if min := s.Min(); 107 != min {
+ if min := s.Min(); min != 107 {
t.Errorf("s.Min(): 107 != %v\n", min)
}
- if max := s.Max(); 10000 != max {
+ if max := s.Max(); max != 10000 {
t.Errorf("s.Max(): 10000 != %v\n", max)
}
- if mean := s.Mean(); 4965.98 != mean {
+ if mean := s.Mean(); mean != 4965.98 {
t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
}
- if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
+ if stdDev := s.StdDev(); stdDev != 2959.825156930727 {
t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
}
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
- if 4615 != ps[0] {
+ if ps[0] != 4615 {
t.Errorf("median: 4615 != %v\n", ps[0])
}
- if 7672 != ps[1] {
+ if ps[1] != 7672 {
t.Errorf("75th percentile: 7672 != %v\n", ps[1])
}
- if 9998.99 != ps[2] {
+ if ps[2] != 9998.99 {
t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
}
}
-func testUniformSampleStatistics(t *testing.T, s Sample) {
- if count := s.Count(); 10000 != count {
+func testUniformSampleStatistics(t *testing.T, s SampleSnapshot) {
+ if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count)
}
- if min := s.Min(); 37 != min {
+ if min := s.Min(); min != 37 {
t.Errorf("s.Min(): 37 != %v\n", min)
}
- if max := s.Max(); 9989 != max {
+ if max := s.Max(); max != 9989 {
t.Errorf("s.Max(): 9989 != %v\n", max)
}
- if mean := s.Mean(); 4748.14 != mean {
+ if mean := s.Mean(); mean != 4748.14 {
t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
}
- if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
+ if stdDev := s.StdDev(); stdDev != 2826.684117548333 {
t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
}
ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
- if 4599 != ps[0] {
+ if ps[0] != 4599 {
t.Errorf("median: 4599 != %v\n", ps[0])
}
- if 7380.5 != ps[1] {
+ if ps[1] != 7380.5 {
t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
}
- if 9986.429999999998 != ps[2] {
+ if math.Abs(9986.429999999998-ps[2]) > epsilonPercentile {
t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
}
}
@@ -345,6 +327,7 @@ func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
quit := make(chan struct{})
go func() {
t := time.NewTicker(10 * time.Millisecond)
+ defer t.Stop()
for {
select {
case <-t.C:
@@ -356,8 +339,22 @@ func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
}
}()
for i := 0; i < 1000; i++ {
- s.Count()
+ s.Snapshot().Count()
time.Sleep(5 * time.Millisecond)
}
quit <- struct{}{}
}
+
+func BenchmarkCalculatePercentiles(b *testing.B) {
+ pss := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
+ var vals []int64
+ for i := 0; i < 1000; i++ {
+ vals = append(vals, int64(rand.Int31()))
+ }
+ v := make([]int64, len(vals))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ copy(v, vals)
+ _ = CalculatePercentiles(v, pss)
+ }
+}
diff --git a/metrics/syslog.go b/metrics/syslog.go
index a0ed4b1b2..fd856d697 100644
--- a/metrics/syslog.go
+++ b/metrics/syslog.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package metrics
@@ -15,11 +16,15 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
r.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
- w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+ w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Snapshot().Count()))
+ case CounterFloat64:
+ w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Snapshot().Count()))
case Gauge:
- w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+ w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Snapshot().Value()))
case GaugeFloat64:
- w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+ w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Snapshot().Value()))
+ case GaugeInfo:
+ w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Snapshot().Value()))
case Healthcheck:
metric.Check()
w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
diff --git a/metrics/testdata/opentsb.want b/metrics/testdata/opentsb.want
new file mode 100644
index 000000000..43fe1b2ac
--- /dev/null
+++ b/metrics/testdata/opentsb.want
@@ -0,0 +1,23 @@
+put pre.elite.count 978307200 1337 host=hal9000
+put pre.elite.one-minute 978307200 0.00 host=hal9000
+put pre.elite.five-minute 978307200 0.00 host=hal9000
+put pre.elite.fifteen-minute 978307200 0.00 host=hal9000
+put pre.elite.mean 978307200 0.00 host=hal9000
+put pre.foo.value 978307200 {"chain_id":"5"} host=hal9000
+put pre.months.count 978307200 12 host=hal9000
+put pre.pi.value 978307200 3.140000 host=hal9000
+put pre.second.count 978307200 1 host=hal9000
+put pre.second.min 978307200 1000 host=hal9000
+put pre.second.max 978307200 1000 host=hal9000
+put pre.second.mean 978307200 1000.00 host=hal9000
+put pre.second.std-dev 978307200 0.00 host=hal9000
+put pre.second.50-percentile 978307200 1000.00 host=hal9000
+put pre.second.75-percentile 978307200 1000.00 host=hal9000
+put pre.second.95-percentile 978307200 1000.00 host=hal9000
+put pre.second.99-percentile 978307200 1000.00 host=hal9000
+put pre.second.999-percentile 978307200 1000.00 host=hal9000
+put pre.second.one-minute 978307200 0.00 host=hal9000
+put pre.second.five-minute 978307200 0.00 host=hal9000
+put pre.second.fifteen-minute 978307200 0.00 host=hal9000
+put pre.second.mean-rate 978307200 0.00 host=hal9000
+put pre.tau.count 978307200 1.570000 host=hal9000
diff --git a/metrics/timer.go b/metrics/timer.go
index 89e22208f..576ad8aa3 100644
--- a/metrics/timer.go
+++ b/metrics/timer.go
@@ -5,26 +5,18 @@ import (
"time"
)
+type TimerSnapshot interface {
+ HistogramSnapshot
+ MeterSnapshot
+}
+
// Timers capture the duration and rate of events.
type Timer interface {
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Rate1() float64
- Rate5() float64
- Rate15() float64
- RateMean() float64
- Snapshot() Timer
- StdDev() float64
+ Snapshot() TimerSnapshot
Stop()
- Sum() int64
Time(func())
- Update(time.Duration)
UpdateSince(time.Time)
- Variance() float64
+ Update(time.Duration)
}
// GetOrRegisterTimer returns an existing Timer or constructs and registers a
@@ -76,66 +68,13 @@ func NewTimer() Timer {
}
// NilTimer is a no-op Timer.
-type NilTimer struct {
- h Histogram
- m Meter
-}
-
-// Count is a no-op.
-func (NilTimer) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilTimer) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilTimer) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilTimer) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilTimer) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilTimer) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Rate1 is a no-op.
-func (NilTimer) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilTimer) Rate5() float64 { return 0.0 }
-
-// Rate15 is a no-op.
-func (NilTimer) Rate15() float64 { return 0.0 }
+type NilTimer struct{}
-// RateMean is a no-op.
-func (NilTimer) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilTimer) Snapshot() Timer { return NilTimer{} }
-
-// StdDev is a no-op.
-func (NilTimer) StdDev() float64 { return 0.0 }
-
-// Stop is a no-op.
-func (NilTimer) Stop() {}
-
-// Sum is a no-op.
-func (NilTimer) Sum() int64 { return 0 }
-
-// Time is a no-op.
-func (NilTimer) Time(func()) {}
-
-// Update is a no-op.
-func (NilTimer) Update(time.Duration) {}
-
-// UpdateSince is a no-op.
-func (NilTimer) UpdateSince(time.Time) {}
-
-// Variance is a no-op.
-func (NilTimer) Variance() float64 { return 0.0 }
+func (NilTimer) Snapshot() TimerSnapshot { return (*emptySnapshot)(nil) }
+func (NilTimer) Stop() {}
+func (NilTimer) Time(f func()) { f() }
+func (NilTimer) Update(time.Duration) {}
+func (NilTimer) UpdateSince(time.Time) {}
// StandardTimer is the standard implementation of a Timer and uses a Histogram
// and Meter.
@@ -145,82 +84,21 @@ type StandardTimer struct {
mutex sync.Mutex
}
-// Count returns the number of events recorded.
-func (t *StandardTimer) Count() int64 {
- return t.histogram.Count()
-}
-
-// Max returns the maximum value in the sample.
-func (t *StandardTimer) Max() int64 {
- return t.histogram.Max()
-}
-
-// Mean returns the mean of the values in the sample.
-func (t *StandardTimer) Mean() float64 {
- return t.histogram.Mean()
-}
-
-// Min returns the minimum value in the sample.
-func (t *StandardTimer) Min() int64 {
- return t.histogram.Min()
-}
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (t *StandardTimer) Percentile(p float64) float64 {
- return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (t *StandardTimer) Percentiles(ps []float64) []float64 {
- return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (t *StandardTimer) Rate1() float64 {
- return t.meter.Rate1()
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (t *StandardTimer) Rate5() float64 {
- return t.meter.Rate5()
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (t *StandardTimer) Rate15() float64 {
- return t.meter.Rate15()
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (t *StandardTimer) RateMean() float64 {
- return t.meter.RateMean()
-}
-
// Snapshot returns a read-only copy of the timer.
-func (t *StandardTimer) Snapshot() Timer {
+func (t *StandardTimer) Snapshot() TimerSnapshot {
t.mutex.Lock()
defer t.mutex.Unlock()
- return &TimerSnapshot{
- histogram: t.histogram.Snapshot().(*HistogramSnapshot),
- meter: t.meter.Snapshot().(*MeterSnapshot),
+ return &timerSnapshot{
+ histogram: t.histogram.Snapshot(),
+ meter: t.meter.Snapshot(),
}
}
-// StdDev returns the standard deviation of the values in the sample.
-func (t *StandardTimer) StdDev() float64 {
- return t.histogram.StdDev()
-}
-
// Stop stops the meter.
func (t *StandardTimer) Stop() {
t.meter.Stop()
}
-// Sum returns the sum in the sample.
-func (t *StandardTimer) Sum() int64 {
- return t.histogram.Sum()
-}
-
// Record the duration of the execution of the given function.
func (t *StandardTimer) Time(f func()) {
ts := time.Now()
@@ -244,86 +122,63 @@ func (t *StandardTimer) UpdateSince(ts time.Time) {
t.meter.Mark(1)
}
-// Variance returns the variance of the values in the sample.
-func (t *StandardTimer) Variance() float64 {
- return t.histogram.Variance()
-}
-
-// TimerSnapshot is a read-only copy of another Timer.
-type TimerSnapshot struct {
- histogram *HistogramSnapshot
- meter *MeterSnapshot
+// timerSnapshot is a read-only copy of another Timer.
+type timerSnapshot struct {
+ histogram HistogramSnapshot
+ meter MeterSnapshot
}
// Count returns the number of events recorded at the time the snapshot was
// taken.
-func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+func (t *timerSnapshot) Count() int64 { return t.histogram.Count() }
// Max returns the maximum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+func (t *timerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (t *timerSnapshot) Size() int { return t.histogram.Size() }
// Mean returns the mean value at the time the snapshot was taken.
-func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+func (t *timerSnapshot) Mean() float64 { return t.histogram.Mean() }
// Min returns the minimum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+func (t *timerSnapshot) Min() int64 { return t.histogram.Min() }
// Percentile returns an arbitrary percentile of sampled values at the time the
// snapshot was taken.
-func (t *TimerSnapshot) Percentile(p float64) float64 {
+func (t *timerSnapshot) Percentile(p float64) float64 {
return t.histogram.Percentile(p)
}
// Percentiles returns a slice of arbitrary percentiles of sampled values at
// the time the snapshot was taken.
-func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+func (t *timerSnapshot) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps)
}
// Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken.
-func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+func (t *timerSnapshot) Rate1() float64 { return t.meter.Rate1() }
// Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken.
-func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+func (t *timerSnapshot) Rate5() float64 { return t.meter.Rate5() }
// Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken.
-func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+func (t *timerSnapshot) Rate15() float64 { return t.meter.Rate15() }
// RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken.
-func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
-
-// Snapshot returns the snapshot.
-func (t *TimerSnapshot) Snapshot() Timer { return t }
+func (t *timerSnapshot) RateMean() float64 { return t.meter.RateMean() }
// StdDev returns the standard deviation of the values at the time the snapshot
// was taken.
-func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
-
-// Stop is a no-op.
-func (t *TimerSnapshot) Stop() {}
+func (t *timerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
// Sum returns the sum at the time the snapshot was taken.
-func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
-
-// Time panics.
-func (*TimerSnapshot) Time(func()) {
- panic("Time called on a TimerSnapshot")
-}
-
-// Update panics.
-func (*TimerSnapshot) Update(time.Duration) {
- panic("Update called on a TimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*TimerSnapshot) UpdateSince(time.Time) {
- panic("UpdateSince called on a TimerSnapshot")
-}
+func (t *timerSnapshot) Sum() int64 { return t.histogram.Sum() }
// Variance returns the variance of the values at the time the snapshot was
// taken.
-func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
+func (t *timerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/metrics/timer_test.go b/metrics/timer_test.go
index c1f0ff938..f10de16c9 100644
--- a/metrics/timer_test.go
+++ b/metrics/timer_test.go
@@ -18,7 +18,7 @@ func BenchmarkTimer(b *testing.B) {
func TestGetOrRegisterTimer(t *testing.T) {
r := NewRegistry()
NewRegisteredTimer("foo", r).Update(47)
- if tm := GetOrRegisterTimer("foo", r); 1 != tm.Count() {
+ if tm := GetOrRegisterTimer("foo", r).Snapshot(); tm.Count() != 1 {
t.Fatal(tm)
}
}
@@ -27,7 +27,7 @@ func TestTimerExtremes(t *testing.T) {
tm := NewTimer()
tm.Update(math.MaxInt64)
tm.Update(0)
- if stdDev := tm.StdDev(); 4.611686018427388e+18 != stdDev {
+ if stdDev := tm.Snapshot().StdDev(); stdDev != 4.611686018427388e+18 {
t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev)
}
}
@@ -35,60 +35,73 @@ func TestTimerExtremes(t *testing.T) {
func TestTimerStop(t *testing.T) {
l := len(arbiter.meters)
tm := NewTimer()
- if len(arbiter.meters) != l+1 {
+ if l+1 != len(arbiter.meters) {
t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters))
}
tm.Stop()
- if len(arbiter.meters) != l {
+ if l != len(arbiter.meters) {
t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters))
}
}
func TestTimerFunc(t *testing.T) {
- tm := NewTimer()
- tm.Time(func() { time.Sleep(50e6) })
- if max := tm.Max(); 35e6 > max || max > 95e6 {
- t.Errorf("tm.Max(): 35e6 > %v || %v > 95e6\n", max, max)
+ var (
+ tm = NewTimer()
+ testStart = time.Now()
+ actualTime time.Duration
+ )
+ tm.Time(func() {
+ time.Sleep(50 * time.Millisecond)
+ actualTime = time.Since(testStart)
+ })
+ var (
+ drift = time.Millisecond * 2
+ measured = time.Duration(tm.Snapshot().Max())
+ ceil = actualTime + drift
+ floor = actualTime - drift
+ )
+ if measured > ceil || measured < floor {
+ t.Errorf("tm.Max(): %v > %v || %v > %v\n", measured, ceil, measured, floor)
}
}
func TestTimerZero(t *testing.T) {
- tm := NewTimer()
- if count := tm.Count(); 0 != count {
+ tm := NewTimer().Snapshot()
+ if count := tm.Count(); count != 0 {
t.Errorf("tm.Count(): 0 != %v\n", count)
}
- if min := tm.Min(); 0 != min {
+ if min := tm.Min(); min != 0 {
t.Errorf("tm.Min(): 0 != %v\n", min)
}
- if max := tm.Max(); 0 != max {
+ if max := tm.Max(); max != 0 {
t.Errorf("tm.Max(): 0 != %v\n", max)
}
- if mean := tm.Mean(); 0.0 != mean {
+ if mean := tm.Mean(); mean != 0.0 {
t.Errorf("tm.Mean(): 0.0 != %v\n", mean)
}
- if stdDev := tm.StdDev(); 0.0 != stdDev {
+ if stdDev := tm.StdDev(); stdDev != 0.0 {
t.Errorf("tm.StdDev(): 0.0 != %v\n", stdDev)
}
ps := tm.Percentiles([]float64{0.5, 0.75, 0.99})
- if 0.0 != ps[0] {
+ if ps[0] != 0.0 {
t.Errorf("median: 0.0 != %v\n", ps[0])
}
- if 0.0 != ps[1] {
+ if ps[1] != 0.0 {
t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
}
- if 0.0 != ps[2] {
+ if ps[2] != 0.0 {
t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
}
- if rate1 := tm.Rate1(); 0.0 != rate1 {
+ if rate1 := tm.Rate1(); rate1 != 0.0 {
t.Errorf("tm.Rate1(): 0.0 != %v\n", rate1)
}
- if rate5 := tm.Rate5(); 0.0 != rate5 {
+ if rate5 := tm.Rate5(); rate5 != 0.0 {
t.Errorf("tm.Rate5(): 0.0 != %v\n", rate5)
}
- if rate15 := tm.Rate15(); 0.0 != rate15 {
+ if rate15 := tm.Rate15(); rate15 != 0.0 {
t.Errorf("tm.Rate15(): 0.0 != %v\n", rate15)
}
- if rateMean := tm.RateMean(); 0.0 != rateMean {
+ if rateMean := tm.RateMean(); rateMean != 0.0 {
t.Errorf("tm.RateMean(): 0.0 != %v\n", rateMean)
}
}
@@ -97,5 +110,5 @@ func ExampleGetOrRegisterTimer() {
m := "account.create.latency"
t := GetOrRegisterTimer(m, nil)
t.Update(47)
- fmt.Println(t.Max()) // Output: 47
+ fmt.Println(t.Snapshot().Max()) // Output: 47
}
diff --git a/metrics/writer.go b/metrics/writer.go
index 88521a80d..098da45c2 100644
--- a/metrics/writer.go
+++ b/metrics/writer.go
@@ -3,8 +3,10 @@ package metrics
import (
"fmt"
"io"
- "sort"
+ "strings"
"time"
+
+ "golang.org/x/exp/slices"
)
// Write sorts writes each metric in the given registry periodically to the
@@ -18,23 +20,28 @@ func Write(r Registry, d time.Duration, w io.Writer) {
// WriteOnce sorts and writes metrics in the given registry to the given
// io.Writer.
func WriteOnce(r Registry, w io.Writer) {
- var namedMetrics namedMetricSlice
+ var namedMetrics []namedMetric
r.Each(func(name string, i interface{}) {
namedMetrics = append(namedMetrics, namedMetric{name, i})
})
-
- sort.Sort(namedMetrics)
+ slices.SortFunc(namedMetrics, namedMetric.cmp)
for _, namedMetric := range namedMetrics {
switch metric := namedMetric.m.(type) {
case Counter:
fmt.Fprintf(w, "counter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", metric.Count())
+ fmt.Fprintf(w, " count: %9d\n", metric.Snapshot().Count())
+ case CounterFloat64:
+ fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %f\n", metric.Snapshot().Count())
case Gauge:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %9d\n", metric.Value())
+ fmt.Fprintf(w, " value: %9d\n", metric.Snapshot().Value())
case GaugeFloat64:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %f\n", metric.Value())
+ fmt.Fprintf(w, " value: %f\n", metric.Snapshot().Value())
+ case GaugeInfo:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %s\n", metric.Snapshot().Value().String())
case Healthcheck:
metric.Check()
fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
@@ -88,13 +95,6 @@ type namedMetric struct {
m interface{}
}
-// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
-type namedMetricSlice []namedMetric
-
-func (nms namedMetricSlice) Len() int { return len(nms) }
-
-func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
-
-func (nms namedMetricSlice) Less(i, j int) bool {
- return nms[i].name < nms[j].name
+func (m namedMetric) cmp(other namedMetric) int {
+ return strings.Compare(m.name, other.name)
}
diff --git a/metrics/writer_test.go b/metrics/writer_test.go
index 1aacc2871..8376bf897 100644
--- a/metrics/writer_test.go
+++ b/metrics/writer_test.go
@@ -1,19 +1,20 @@
package metrics
import (
- "sort"
"testing"
+
+ "golang.org/x/exp/slices"
)
func TestMetricsSorting(t *testing.T) {
- var namedMetrics = namedMetricSlice{
+ var namedMetrics = []namedMetric{
{name: "zzz"},
{name: "bbb"},
{name: "fff"},
{name: "ggg"},
}
- sort.Sort(namedMetrics)
+ slices.SortFunc(namedMetrics, namedMetric.cmp)
for i, name := range []string{"bbb", "fff", "ggg", "zzz"} {
if namedMetrics[i].name != name {
t.Fail()
diff --git a/node/api.go b/node/api.go
index 23edbe2b3..bf84463a2 100644
--- a/node/api.go
+++ b/node/api.go
@@ -310,31 +310,31 @@ func (api *PublicDebugAPI) Metrics(raw bool) (map[string]interface{}, error) {
switch metric := metric.(type) {
case metrics.Counter:
root[name] = map[string]interface{}{
- "Overall": float64(metric.Count()),
+ "Overall": float64(metric.Snapshot().Count()),
}
case metrics.Meter:
root[name] = map[string]interface{}{
- "AvgRate01Min": metric.Rate1(),
- "AvgRate05Min": metric.Rate5(),
- "AvgRate15Min": metric.Rate15(),
- "MeanRate": metric.RateMean(),
- "Overall": float64(metric.Count()),
+ "AvgRate01Min": metric.Snapshot().Rate1(),
+ "AvgRate05Min": metric.Snapshot().Rate5(),
+ "AvgRate15Min": metric.Snapshot().Rate15(),
+ "MeanRate": metric.Snapshot().RateMean(),
+ "Overall": float64(metric.Snapshot().Count()),
}
case metrics.Timer:
root[name] = map[string]interface{}{
- "AvgRate01Min": metric.Rate1(),
- "AvgRate05Min": metric.Rate5(),
- "AvgRate15Min": metric.Rate15(),
- "MeanRate": metric.RateMean(),
- "Overall": float64(metric.Count()),
+ "AvgRate01Min": metric.Snapshot().Rate1(),
+ "AvgRate05Min": metric.Snapshot().Rate5(),
+ "AvgRate15Min": metric.Snapshot().Rate15(),
+ "MeanRate": metric.Snapshot().RateMean(),
+ "Overall": float64(metric.Snapshot().Count()),
"Percentiles": map[string]interface{}{
- "5": metric.Percentile(0.05),
- "20": metric.Percentile(0.2),
- "50": metric.Percentile(0.5),
- "80": metric.Percentile(0.8),
- "95": metric.Percentile(0.95),
+ "5": metric.Snapshot().Percentile(0.05),
+ "20": metric.Snapshot().Percentile(0.2),
+ "50": metric.Snapshot().Percentile(0.5),
+ "80": metric.Snapshot().Percentile(0.8),
+ "95": metric.Snapshot().Percentile(0.95),
},
}
@@ -345,31 +345,31 @@ func (api *PublicDebugAPI) Metrics(raw bool) (map[string]interface{}, error) {
switch metric := metric.(type) {
case metrics.Counter:
root[name] = map[string]interface{}{
- "Overall": float64(metric.Count()),
+ "Overall": float64(metric.Snapshot().Count()),
}
case metrics.Meter:
root[name] = map[string]interface{}{
- "Avg01Min": format(metric.Rate1()*60, metric.Rate1()),
- "Avg05Min": format(metric.Rate5()*300, metric.Rate5()),
- "Avg15Min": format(metric.Rate15()*900, metric.Rate15()),
- "Overall": format(float64(metric.Count()), metric.RateMean()),
+ "Avg01Min": format(metric.Snapshot().Rate1()*60, metric.Snapshot().Rate1()),
+ "Avg05Min": format(metric.Snapshot().Rate5()*300, metric.Snapshot().Rate5()),
+ "Avg15Min": format(metric.Snapshot().Rate15()*900, metric.Snapshot().Rate15()),
+ "Overall": format(float64(metric.Snapshot().Count()), metric.Snapshot().RateMean()),
}
case metrics.Timer:
root[name] = map[string]interface{}{
- "Avg01Min": format(metric.Rate1()*60, metric.Rate1()),
- "Avg05Min": format(metric.Rate5()*300, metric.Rate5()),
- "Avg15Min": format(metric.Rate15()*900, metric.Rate15()),
- "Overall": format(float64(metric.Count()), metric.RateMean()),
- "Maximum": time.Duration(metric.Max()).String(),
- "Minimum": time.Duration(metric.Min()).String(),
+ "Avg01Min": format(metric.Snapshot().Rate1()*60, metric.Snapshot().Rate1()),
+ "Avg05Min": format(metric.Snapshot().Rate5()*300, metric.Snapshot().Rate5()),
+ "Avg15Min": format(metric.Snapshot().Rate15()*900, metric.Snapshot().Rate15()),
+ "Overall": format(float64(metric.Snapshot().Count()), metric.Snapshot().RateMean()),
+ "Maximum": time.Duration(metric.Snapshot().Max()).String(),
+ "Minimum": time.Duration(metric.Snapshot().Min()).String(),
"Percentiles": map[string]interface{}{
- "5": time.Duration(metric.Percentile(0.05)).String(),
- "20": time.Duration(metric.Percentile(0.2)).String(),
- "50": time.Duration(metric.Percentile(0.5)).String(),
- "80": time.Duration(metric.Percentile(0.8)).String(),
- "95": time.Duration(metric.Percentile(0.95)).String(),
+ "5": time.Duration(metric.Snapshot().Percentile(0.05)).String(),
+ "20": time.Duration(metric.Snapshot().Percentile(0.2)).String(),
+ "50": time.Duration(metric.Snapshot().Percentile(0.5)).String(),
+ "80": time.Duration(metric.Snapshot().Percentile(0.8)).String(),
+ "95": time.Duration(metric.Snapshot().Percentile(0.95)).String(),
},
}