Skip to content

Commit

Permalink
Merge pull request #4 from jellyb0y/after-hard-fork
Browse files Browse the repository at this point in the history
After hard fork rebase
  • Loading branch information
jellyb0y authored Jun 29, 2024
2 parents c6ed32a + 9320ef6 commit f31b7e4
Show file tree
Hide file tree
Showing 49 changed files with 895 additions and 588 deletions.
22 changes: 22 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,26 @@
# Changelog
## v1.4.9
### FEATURE
* [\#2463](https://github.com/bnb-chain/bsc/pull/2463) utils: add check_blobtx.js
* [\#2470](https://github.com/bnb-chain/bsc/pull/2470) jsutils: faucet successful requests within blocks
* [\#2467](https://github.com/bnb-chain/bsc/pull/2467) internal/ethapi: add optional parameter for blobSidecars

### IMPROVEMENT
* [\#2462](https://github.com/bnb-chain/bsc/pull/2462) cmd/utils: add a flag to change breathe block interval for testing
* [\#2497](https://github.com/bnb-chain/bsc/pull/2497) params/config: add Bohr hardfork
* [\#2479](https://github.com/bnb-chain/bsc/pull/2479) dev: ensure consistency in BPS bundle result

### BUGFIX
* [\#2461](https://github.com/bnb-chain/bsc/pull/2461) eth/handler: check lists in body before broadcast blocks
* [\#2455](https://github.com/bnb-chain/bsc/pull/2455) cmd: fix memory leak when big dataset
* [\#2466](https://github.com/bnb-chain/bsc/pull/2466) sync: fix some sync issues caused by prune-block.
* [\#2475](https://github.com/bnb-chain/bsc/pull/2475) fix: move mev op to MinerAPI & add command to console
* [\#2473](https://github.com/bnb-chain/bsc/pull/2473) fix: limit the gas price of the mev bid
* [\#2484](https://github.com/bnb-chain/bsc/pull/2484) fix: fix inspect database error
* [\#2481](https://github.com/bnb-chain/bsc/pull/2481) fix: keep 9W blocks in ancient db when prune block
* [\#2495](https://github.com/bnb-chain/bsc/pull/2495) fix: add an empty freeze db
* [\#2507](https://github.com/bnb-chain/bsc/pull/2507) fix: waiting for the last simulation before pick best bid

## v1.4.8
### FEATURE
* [\#2483](https://github.com/bnb-chain/bsc/pull/2483) core/vm: add secp256r1 into PrecompiledContractsHaber
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ To combine DPoS and PoA for consensus, BNB Smart Chain implement a novel consens
2. Validators take turns to produce blocks in a PoA manner, similar to Ethereum's Clique consensus engine.
3. Validator set are elected in and out based on a staking based governance on BNB Beacon Chain.
4. The validator set change is relayed via a cross-chain communication mechanism.
5. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/docs/learn/system-contract) to achieve liveness slash, revenue distributing and validator set renewing func.
5. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/bnb-smart-chain/staking/overview/#system-contracts) to achieve liveness slash, revenue distributing and validator set renewing func.


### Light Client of BNB Beacon Chain
Expand Down Expand Up @@ -183,7 +183,7 @@ This tool is optional and if you leave it out you can always attach to an alread

#### 7. More

More details about [running a node](https://docs.bnbchain.org/docs/validator/fullnode) and [becoming a validator](https://docs.bnbchain.org/docs/validator/create-val)
More details about [running a node](https://docs.bnbchain.org/bnb-smart-chain/developers/node_operators/full_node/) and [becoming a validator](https://docs.bnbchain.org/bnb-smart-chain/validator/create-val/)

*Note: Although some internal protective measures prevent transactions from
crossing over between the main network and test network, you should always
Expand Down
5 changes: 5 additions & 0 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ var (
utils.CachePreimagesFlag,
utils.OverrideCancun,
utils.OverrideHaber,
utils.OverrideBohr,
utils.OverrideVerkle,
}, utils.DatabaseFlags),
Description: `
Expand Down Expand Up @@ -261,6 +262,10 @@ func initGenesis(ctx *cli.Context) error {
v := ctx.Uint64(utils.OverrideHaber.Name)
overrides.OverrideHaber = &v
}
if ctx.IsSet(utils.OverrideBohr.Name) {
v := ctx.Uint64(utils.OverrideBohr.Name)
overrides.OverrideBohr = &v
}
if ctx.IsSet(utils.OverrideVerkle.Name) {
v := ctx.Uint64(utils.OverrideVerkle.Name)
overrides.OverrideVerkle = &v
Expand Down
4 changes: 4 additions & 0 deletions cmd/geth/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
v := ctx.Uint64(utils.OverrideHaber.Name)
cfg.Eth.OverrideHaber = &v
}
if ctx.IsSet(utils.OverrideBohr.Name) {
v := ctx.Uint64(utils.OverrideBohr.Name)
cfg.Eth.OverrideBohr = &v
}
if ctx.IsSet(utils.OverrideVerkle.Name) {
v := ctx.Uint64(utils.OverrideVerkle.Name)
cfg.Eth.OverrideVerkle = &v
Expand Down
29 changes: 22 additions & 7 deletions cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,12 @@ Remove blockchain and state databases`,
dbInspectTrieCmd = &cli.Command{
Action: inspectTrie,
Name: "inspect-trie",
ArgsUsage: "<blocknum> <jobnum>",
ArgsUsage: "<blocknum> <jobnum> <topn>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
},
Usage: "Inspect the MPT tree of the account and contract.",
Usage: "Inspect the MPT tree of the account and contract. 'blocknum' can be latest/snapshot/number. 'topn' means output the top N storage tries info ranked by the total number of TrieNodes",
Description: `This commands iterates the entrie WorldState.`,
}
dbCheckStateContentCmd = &cli.Command{
Expand Down Expand Up @@ -386,6 +386,7 @@ func inspectTrie(ctx *cli.Context) error {
blockNumber uint64
trieRootHash common.Hash
jobnum uint64
topN uint64
)

stack, _ := makeConfigNode(ctx)
Expand All @@ -405,24 +406,37 @@ func inspectTrie(ctx *cli.Context) error {
var err error
blockNumber, err = strconv.ParseUint(ctx.Args().Get(0), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
return fmt.Errorf("failed to parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
}
}

if ctx.NArg() == 1 {
jobnum = 1000
topN = 10
} else if ctx.NArg() == 2 {
var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil {
return fmt.Errorf("failed to parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
topN = 10
} else {
var err error
jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64)
if err != nil {
return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
return fmt.Errorf("failed to parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}

topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64)
if err != nil {
return fmt.Errorf("failed to parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err)
}
}

if blockNumber != math.MaxUint64 {
headerBlockHash = rawdb.ReadCanonicalHash(db, blockNumber)
if headerBlockHash == (common.Hash{}) {
return errors.New("ReadHeadBlockHash empry hash")
return errors.New("ReadHeadBlockHash empty hash")
}
blockHeader := rawdb.ReadHeader(db, headerBlockHash, blockNumber)
trieRootHash = blockHeader.Root
Expand All @@ -437,6 +451,7 @@ func inspectTrie(ctx *cli.Context) error {
if dbScheme == rawdb.PathScheme {
config = &triedb.Config{
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
Cache: 0,
}
} else if dbScheme == rawdb.HashScheme {
config = triedb.HashDefaults
Expand All @@ -448,7 +463,7 @@ func inspectTrie(ctx *cli.Context) error {
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
return err
}
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum)
theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum, int(topN))
if err != nil {
return err
}
Expand Down Expand Up @@ -493,7 +508,7 @@ func ancientInspect(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, true, true)
db := utils.MakeChainDatabase(ctx, stack, true, false)
defer db.Close()
return rawdb.AncientInspect(db)
}
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ var (
utils.RialtoHash,
utils.OverrideCancun,
utils.OverrideHaber,
utils.OverrideBohr,
utils.OverrideVerkle,
utils.OverrideFullImmutabilityThreshold,
utils.OverrideMinBlocksForBlobRequests,
Expand Down
7 changes: 3 additions & 4 deletions cmd/geth/pruneblock_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient
if err != nil {
return nil, err
}
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData)
frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData, false)
if err != nil {
kvdb.Close()
return nil, err
Expand Down Expand Up @@ -178,11 +178,10 @@ func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemai

// Force run a freeze cycle
type freezer interface {
Freeze() error
Freeze(threshold uint64) error
Ancients() (uint64, error)
}
blockchain.SetFinalized(blocks[len(blocks)-1].Header())
db.(freezer).Freeze()
db.(freezer).Freeze(10)

frozen, err := db.Ancients()
//make sure there're frozen items
Expand Down
16 changes: 15 additions & 1 deletion cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,11 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
"github.com/ethereum/go-ethereum/triedb/pathdb"
cli "github.com/urfave/cli/v2"
)

Expand Down Expand Up @@ -245,7 +247,16 @@ func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) {
NoBuild: true,
AsyncBuild: false,
}
snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, nil), headBlock.Root(), TriesInMemory, false)
dbScheme := rawdb.ReadStateScheme(chaindb)
var config *triedb.Config
if dbScheme == rawdb.PathScheme {
config = &triedb.Config{
PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly),
}
} else if dbScheme == rawdb.HashScheme {
config = triedb.HashDefaults
}
snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, config), headBlock.Root(), TriesInMemory, false)
if err != nil {
log.Error("snaptree error", "err", err)
return nil, err // The relevant snapshot(s) might not exist
Expand Down Expand Up @@ -333,6 +344,9 @@ func pruneBlock(ctx *cli.Context) error {
stack, config = makeConfigNode(ctx)
defer stack.Close()
blockAmountReserved = ctx.Uint64(utils.BlockAmountReserved.Name)
if blockAmountReserved < params.FullImmutabilityThreshold {
return fmt.Errorf("block-amount-reserved must be greater than or equal to %d", params.FullImmutabilityThreshold)
}
chaindb, err = accessDb(ctx, stack)
if err != nil {
return err
Expand Down
51 changes: 51 additions & 0 deletions cmd/jsutils/check_blobtx.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import { ethers } from "ethers";
import program from "commander";

// depends on ethjs v6.11.0+ for 4844, https://github.com/ethers-io/ethers.js/releases/tag/v6.11.0
// BSC testnet enabled 4844 on block: 39539137
// Usage:
// nvm use 20
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137
// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);

const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}

for (let i = startBlock; i <= endBlock; i++) {
let blockData = await provider.getBlock(i);
console.log("startBlock:",startBlock, "endBlock:", endBlock, "curBlock", i, "blobGasUsed", blockData.blobGasUsed);
if (blockData.blobGasUsed == 0) {
continue
}
for (let txIndex = 0; txIndex<= blockData.transactions.length - 1; txIndex++) {
let txHash = blockData.transactions[txIndex]
let txData = await provider.getTransaction(txHash);
if (txData.type == 3) {
console.log("BlobTx in block:",i, " txIndex:", txIndex, " txHash:", txHash);
}
}
}
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});
49 changes: 49 additions & 0 deletions cmd/jsutils/faucet_request.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import { ethers } from "ethers";
import program from "commander";

// Usage:
// node faucet_request.js --rpc localhost:8545 --startNum 39539137
// node faucet_request.js --rpc localhost:8545 --startNum 39539137 --endNum 40345994

// node faucet_request.js --rpc https://data-seed-prebsc-1-s1.bnbchain.org:8545 --startNum 39539137 --endNum 40345994
program.option("--rpc <Rpc>", "Rpc Server URL");
program.option("--startNum <Num>", "start block", 0);
program.option("--endNum <Num>", "end block", 0);
program.parse(process.argv);

const provider = new ethers.JsonRpcProvider(program.rpc);
const main = async () => {
var startBlock = parseInt(program.startNum)
var endBlock = parseInt(program.endNum)
if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) {
console.error("invalid input, --startNum", program.startNum, "--end", program.endNum)
return
}
// if --endNum is not specified, set it to the latest block number.
if (endBlock == 0) {
endBlock = await provider.getBlockNumber();
}
if (startBlock > endBlock) {
console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock);
return
}

let startBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", startBlock)
let endBalance = await provider.getBalance("0xaa25Aa7a19f9c426E07dee59b12f944f4d9f1DD3", endBlock)
const faucetAmount = BigInt(0.3 * 10**18); // Convert 0.3 ether to wei as a BigInt
const numFaucetRequest = (startBalance - endBalance) / faucetAmount;

// Convert BigInt to ether
const startBalanceEth = Number(startBalance) / 10**18;
const endBalanceEth = Number(endBalance) / 10**18;

console.log(`Start Balance: ${startBalanceEth} ETH`);
console.log(`End Balance: ${endBalanceEth} ETH`);

console.log("successful faucet request: ",numFaucetRequest);
};
main().then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});
6 changes: 6 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,11 @@ var (
Usage: "Manually specify the Haber fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideBohr = &cli.Uint64Flag{
Name: "override.bohr",
Usage: "Manually specify the Bohr fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideVerkle = &cli.Uint64Flag{
Name: "override.verkle",
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
Expand Down Expand Up @@ -1082,6 +1087,7 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Name: "block-amount-reserved",
Usage: "Sets the expected remained amount of blocks for offline block prune",
Category: flags.BlockHistoryCategory,
Value: params.FullImmutabilityThreshold,
}

CheckSnapshotWithMPT = &cli.BoolFlag{
Expand Down
2 changes: 1 addition & 1 deletion cmd/utils/history_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ func TestHistoryImportAndExport(t *testing.T) {

// Now import Era.
freezer := t.TempDir()
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false)
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false)
if err != nil {
panic(err)
}
Expand Down
Loading

0 comments on commit f31b7e4

Please sign in to comment.