From 0f622f47bbd117a096f836b7c7a038855d93daaf Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Tue, 28 May 2024 10:45:00 +0200 Subject: [PATCH 01/18] Create tables --- .../20240527110422-token-utility-1.js | 53 +++++++++++++++++++ .../20240527110422-token-utility-1-down.sql | 13 +++++ .../20240527110422-token-utility-1-up.sql | 45 ++++++++++++++++ 3 files changed, 111 insertions(+) create mode 100644 db/migrations/20240527110422-token-utility-1.js create mode 100644 db/migrations/sqls/20240527110422-token-utility-1-down.sql create mode 100644 db/migrations/sqls/20240527110422-token-utility-1-up.sql diff --git a/db/migrations/20240527110422-token-utility-1.js b/db/migrations/20240527110422-token-utility-1.js new file mode 100644 index 00000000..920586a3 --- /dev/null +++ b/db/migrations/20240527110422-token-utility-1.js @@ -0,0 +1,53 @@ +'use strict'; + +var dbm; +var type; +var seed; +var fs = require('fs'); +var path = require('path'); +var Promise; + +/** + * We receive the dbmigrate dependency from dbmigrate initially. + * This enables us to not have to rely on NODE_PATH. + */ +exports.setup = function(options, seedLink) { + dbm = options.dbmigrate; + type = dbm.dataType; + seed = seedLink; + Promise = options.Promise; +}; + +exports.up = function(db) { + var filePath = path.join(__dirname, 'sqls', '20240527110422-token-utility-1-up.sql'); + return new Promise( function( resolve, reject ) { + fs.readFile(filePath, {encoding: 'utf-8'}, function(err,data){ + if (err) return reject(err); + console.log('received data: ' + data); + + resolve(data); + }); + }) + .then(function(data) { + return db.runSql(data); + }); +}; + +exports.down = function(db) { + var filePath = path.join(__dirname, 'sqls', '20240527110422-token-utility-1-down.sql'); + return new Promise( function( resolve, reject ) { + fs.readFile(filePath, {encoding: 'utf-8'}, function(err,data){ + if (err) return reject(err); + console.log('received data: ' + data); + + resolve(data); + }); + }) + .then(function(data) { + return db.runSql(data); + }); +}; + +exports._meta = { + "version": 1 +}; diff --git a/db/migrations/sqls/20240527110422-token-utility-1-down.sql b/db/migrations/sqls/20240527110422-token-utility-1-down.sql new file mode 100644 index 00000000..3372eb44 --- /dev/null +++ b/db/migrations/sqls/20240527110422-token-utility-1-down.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS ddc_metric_value; + +DROP TABLE IF EXISTS cluster; + +DROP TABLE IF EXISTS node; + +DROP TABLE IF EXISTS node_provider; + +DROP TABLE IF EXISTS node_provider_reward; + +DROP TABLE IF EXISTS node_to_cluster; + +DROP TABLE IF EXISTS node_to_node_provider; \ No newline at end of file diff --git a/db/migrations/sqls/20240527110422-token-utility-1-up.sql b/db/migrations/sqls/20240527110422-token-utility-1-up.sql new file mode 100644 index 00000000..caf85217 --- /dev/null +++ b/db/migrations/sqls/20240527110422-token-utility-1-up.sql @@ -0,0 +1,45 @@ +CREATE TABLE IF NOT EXISTS cluster ( + id BYTEA PRIMARY KEY +); + +CREATE TABLE IF NOT EXISTS node_provider ( + id TEXT PRIMARY KEY +); + +CREATE TABLE IF NOT EXISTS node ( + id BYTEA PRIMARY KEY, + total_capacity BIGINT, + uptime INTERVAL, + throughput BIGINT, + node_type TEXT, + tier SMALLINT +); + +CREATE TABLE IF NOT EXISTS node_to_cluster ( + id SERIAL PRIMARY KEY, + cluster_id BYTEA NOT NULL, + node_id BYTEA NOT NULL, + FOREIGN KEY (cluster_id) REFERENCES cluster(id), + FOREIGN KEY (node_id) REFERENCES node(id) +); + +CREATE TABLE IF NOT EXISTS node_to_node_provider ( + id SERIAL PRIMARY KEY, + node_provider_id TEXT NOT NULL, + node_id BYTEA NOT NULL +); + +CREATE TABLE IF NOT EXISTS node_provider_reward ( + id SERIAL PRIMARY KEY, + node_provider_id TEXT NOT NULL, + rewarded NUMERIC(39, 0), + expected_to_reward NUMERIC(39, 0), + era INTEGER, + batch_index SMALLINT, + stored_bytes BIGINT, + transferred_bytes BIGINT, + puts NUMERIC(39, 0), + gets NUMERIC(39, 0), + explorer_link TEXT, + FOREIGN KEY (node_provider_id) REFERENCES node_provider(id) +); From 6a73ae6a4ecad8f931cb6b7e730231b007274576 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Wed, 29 May 2024 13:51:25 +0200 Subject: [PATCH 02/18] Implement DDC nodes table --- frontend/locales/en.json | 13 ++ frontend/pages/ddc-nodes.vue | 379 +++++++++++++++++++++++++++++++++++ 2 files changed, 392 insertions(+) create mode 100644 frontend/pages/ddc-nodes.vue diff --git a/frontend/locales/en.json b/frontend/locales/en.json index 393b3fb1..1aae7fad 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -119,6 +119,19 @@ "loading_data": "Loading data, it can take a long time. Be patient, please", "download_csv": "Download in CSV format" }, + "ddc-nodes": { + "cluster_dashboard": "Cluster Dashboard", + "cluster_id": "Cluster ID", + "nodes": "Nodes", + "tier": "Tier", + "throughput": "Throughput", + "search_placeholder": "Search nodes by node ID", + "node_type": "Node Type", + "node_id": "Node ID", + "rewards": "Rewards (24H)", + "gets": "Successful Reads in the last Epoch", + "puts": "Successful Writes in the last Epoch" + }, "account": { "head_title": "Cere Stats - Account {address}", "head_content": "{networkName} Account {address}", diff --git a/frontend/pages/ddc-nodes.vue b/frontend/pages/ddc-nodes.vue new file mode 100644 index 00000000..acbb4abc --- /dev/null +++ b/frontend/pages/ddc-nodes.vue @@ -0,0 +1,379 @@ + + From 1567a4c5013aecf411833f99ac474f63a6d910e8 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Fri, 31 May 2024 17:02:32 +0200 Subject: [PATCH 03/18] Partially implement cluster dashboard --- backend/crawlers/ddc2.js | 224 +++++++++++ frontend/frontend.config.js | 4 +- frontend/locales/en.json | 6 +- frontend/pages/accounts.vue | 5 +- frontend/pages/cluster.vue | 379 ++++++++++++++++++ .../pages/{ddc-nodes.vue => cluster/_id.vue} | 43 +- frontend/store/ranking.js | 220 +++++----- 7 files changed, 748 insertions(+), 133 deletions(-) create mode 100644 backend/crawlers/ddc2.js create mode 100644 frontend/pages/cluster.vue rename frontend/pages/{ddc-nodes.vue => cluster/_id.vue} (91%) diff --git a/backend/crawlers/ddc2.js b/backend/crawlers/ddc2.js new file mode 100644 index 00000000..e451c205 --- /dev/null +++ b/backend/crawlers/ddc2.js @@ -0,0 +1,224 @@ +// @ts-check +const pino = require('pino'); +const { + wait, + getClient, + getPolkadotAPI, + isNodeSynced, + dbParamQuery, +} = require('../lib/utils'); +const backendConfig = require('../backend.config'); + +const crawlerName = 'ddc2'; +const logger = pino({ + level: backendConfig.logLevel, +}); +const loggerOptions = { + crawler: crawlerName, +}; +const config = backendConfig.crawlers.find( + ({ name }) => name === crawlerName, +); + +const actualizeDdcClusters = async (api, dbClient) => { + const response = await api.query.ddcClusters.clusters.entries(); + + const clusters = response.map((entity) => entity[1].toHuman()); + + const persistClusterQuery = ` + INSERT INTO cluster ( + id, + manager_id, + reserve_id, + node_provider_auth_contract, + erasure_coding_required, + erasure_coding_total, + replication_total + ) VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7 + ) + ON CONFLICT (id) + DO UPDATE SET + id = EXCLUDED.id, + manager_id = EXCLUDED.manager_id, + reserve_id = EXCLUDED.reserve_id, + node_provider_auth_contract = EXCLUDED.node_provider_auth_contract, + erasure_coding_required = EXCLUDED.erasure_coding_required, + erasure_coding_total = EXCLUDED.erasure_coding_total, + replication_total = EXCLUDED.replication_total + ;`; + + // eslint-disable-next-line no-restricted-syntax + for (const cluster of clusters) { + const clusterData = [ + cluster.clusterId, + cluster.managerId, + cluster.reserveId, + cluster.props.nodeProviderAuthContract, + cluster.props.erasureCodingRequired, + cluster.props.erasureCodingTotal, + cluster.props.replicationTotal, + ]; + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistClusterQuery, clusterData, loggerOptions); + } +}; + +const actualizeDdcNodes = async (api, dbClient) => { + const response = await api.query.ddcNodes.storageNodes.entries(); + + const nodes = response.map((entity) => entity[1].toHuman()); + + const persistNodeQuery = ` + INSERT INTO node ( + id, + host, + domain, + ssl, + http_port, + grpc_port, + p2p_port, + mode + ) VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + $8 + ) + ON CONFLICT (id) + DO UPDATE SET + id = EXCLUDED.id, + host = EXCLUDED.host, + domain = EXCLUDED.domain, + ssl = EXCLUDED.ssl, + http_port = EXCLUDED.http_port, + grpc_port = EXCLUDED.grpc_port, + p2p_port = EXCLUDED.p2p_port, + mode = EXCLUDED.mode + ;`; + + const persistClusterQuery = ` + INSERT INTO node_to_cluster ( + cluster_id, + node_id + ) VALUES ( + $1, + $2 + ) + ON CONFLICT (cluster_id, node_id) + DO UPDATE SET + cluster_id = EXCLUDED.cluster_id, + node_id = EXCLUDED.node_id + ;`; + + const persistProviderQuery = ` + INSERT INTO node_provider ( + id + ) VALUES ( + $1 + ) + ON CONFLICT (id) + DO UPDATE SET + id = EXCLUDED.id + ;`; + + const persistNodeToProviderQuery = ` + INSERT INTO node_to_node_provider ( + node_provider_id, + node_id + ) VALUES ( + $1, + $2 + ) + ON CONFLICT (node_provider_id, node_id) + DO UPDATE SET + node_provider_id = EXCLUDED.node_provider_id, + node_id = EXCLUDED.node_id + ;`; + + // eslint-disable-next-line no-restricted-syntax + for (const node of nodes) { + const nodeData = [ + node.pubKey, + node.props.host, + node.props.domain, + node.props.ssl, + Number(node.props.httpPort.replace(/,/g, '')), + Number(node.props.grpcPort.replace(/,/g, '')), + Number(node.props.p2pPort.replace(/,/g, '')), + node.props.mode, + ]; + const providerData = [node.providerId]; + const nodeToProviderData = [node.providerId, node.pubKey]; + const nodeClusterData = [node.clusterId, node.pubKey]; + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistProviderQuery, providerData, loggerOptions); + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistNodeQuery, nodeData, loggerOptions); + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistClusterQuery, nodeClusterData, loggerOptions); + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistNodeToProviderQuery, nodeToProviderData, loggerOptions); + } +}; + +const crawler = async (delayedStart) => { + if (delayedStart) { + logger.debug(loggerOptions, `Delaying DDC crawler start for ${config.startDelay / 1000}s`); + await wait(config.startDelay); + } + + logger.info(loggerOptions, 'Running DDC crawler...'); + + const dbClient = await getClient(loggerOptions); + const api = await getPolkadotAPI(loggerOptions, config.apiCustomTypes); + + let synced = await isNodeSynced(api, loggerOptions); + while (!synced) { + // eslint-disable-next-line no-await-in-loop + await wait(10000); + // eslint-disable-next-line no-await-in-loop + synced = await isNodeSynced(api, loggerOptions); + } + + const startTime = new Date().getTime(); + + await actualizeDdcClusters(api, dbClient); + await actualizeDdcNodes(api, dbClient); + + logger.debug(loggerOptions, 'Disconnecting from API'); + await api.disconnect().catch((error) => logger.error(loggerOptions, `API disconnect error: ${JSON.stringify(error)}`)); + + logger.debug(loggerOptions, 'Disconnecting from DB'); + await dbClient.end().catch((error) => logger.error(loggerOptions, `DB disconnect error: ${JSON.stringify(error)}`)); + + const endTime = new Date().getTime(); + logger.info(loggerOptions, `Processed in ${((endTime - startTime) / 1000).toFixed(0)}s`); + + logger.info(loggerOptions, `Next execution in ${(config.pollingTime / 60000).toFixed(0)}m...`); + setTimeout( + () => crawler(false), + config.pollingTime, + ); +}; + +crawler(true).catch((error) => { + // eslint-disable-next-line no-console + console.error(error); + process.exit(-1); +}); diff --git a/frontend/frontend.config.js b/frontend/frontend.config.js index 8e1ca0dd..5e5738f8 100644 --- a/frontend/frontend.config.js +++ b/frontend/frontend.config.js @@ -6,8 +6,8 @@ export const network = { ss58Format: 54, coinGeckoDenom: 'cere-network', nodeWs: 'wss://archive.mainnet.cere.network/ws', - backendWs: 'wss://hasura.stats.cere.network/v1/graphql', - backendHttp: 'https://api.stats.cere.network/api/v1', + backendWs: 'ws://localhost:8082/v1/graphql', + backendHttp: 'http://localhost:8080/api/v1', googleAnalytics: '', theme: '@/assets/scss/themes/polkastats.scss', // ranking Í diff --git a/frontend/locales/en.json b/frontend/locales/en.json index 1aae7fad..e87217a0 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -119,7 +119,7 @@ "loading_data": "Loading data, it can take a long time. Be patient, please", "download_csv": "Download in CSV format" }, - "ddc-nodes": { + "cluster": { "cluster_dashboard": "Cluster Dashboard", "cluster_id": "Cluster ID", "nodes": "Nodes", @@ -129,8 +129,8 @@ "node_type": "Node Type", "node_id": "Node ID", "rewards": "Rewards (24H)", - "gets": "Successful Reads in the last Epoch", - "puts": "Successful Writes in the last Epoch" + "gets": "Successful Reads in the last Era", + "puts": "Successful Writes in the last Era" }, "account": { "head_title": "Cere Stats - Account {address}", diff --git a/frontend/pages/accounts.vue b/frontend/pages/accounts.vue index d43d287c..efcbe77c 100644 --- a/frontend/pages/accounts.vue +++ b/frontend/pages/accounts.vue @@ -89,10 +89,7 @@ - - + + + diff --git a/frontend/locales/en.json b/frontend/locales/en.json index 3684c142..6cdcca59 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -91,7 +91,8 @@ "events": "Events", "validator": "Validator", "how_to_stake": "How to stake", - "economics": "Economics" + "economics": "Economics", + "clusters": "Clusters" }, "error": { "page_not_found": "Page not found!", From aed3d071bb12bed299975fc5937aae75167128c7 Mon Sep 17 00:00:00 2001 From: Maksim Ramanenkau Date: Thu, 6 Jun 2024 10:34:52 +0200 Subject: [PATCH 07/18] Make Historical Rewards table adaptive --- frontend/assets/scss/themes/polkastats.scss | 34 ++++ frontend/locales/en.json | 2 +- frontend/pages/cluster/_id.vue | 210 +++++++++++++++++--- 3 files changed, 221 insertions(+), 25 deletions(-) diff --git a/frontend/assets/scss/themes/polkastats.scss b/frontend/assets/scss/themes/polkastats.scss index 86c9adce..5a69f767 100644 --- a/frontend/assets/scss/themes/polkastats.scss +++ b/frontend/assets/scss/themes/polkastats.scss @@ -264,6 +264,40 @@ h6 { } } +// cluster page +#cluster-table th { + text-align: center; +} +#rewards-table th { + text-align: center; +} +.page-cluster .identicon { + display: inline-block; +} +.page-cluster td div { + padding: 0 !important; +} +.mw240 { + min-width: 240px; +} +@media (max-width: 767.98px) { + .page-cluster + .table.b-table.b-table-stacked-md + > tbody + > tr + > [data-label]::before { + display: none; + } + .page-cluster + .table.b-table.b-table-stacked-md + > tbody + > tr + > [data-label] + > div { + width: 100%; + } +} + // block page .block-table tr td:first-child { width: 30%; diff --git a/frontend/locales/en.json b/frontend/locales/en.json index 6cdcca59..2b2644d5 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -124,7 +124,7 @@ "cluster_dashboard": "Cluster Dashboard", "cluster_id": "Cluster ID", "providers": "Providers", - "throughput": "Throughput", + "throughput": "Throughput (bytes)", "node_type": "Node Type", "node_provider_id": "Node Provider", "rewards": "Rewards for the last Era", diff --git a/frontend/pages/cluster/_id.vue b/frontend/pages/cluster/_id.vue index 0c4a97b3..2c6b3e0f 100644 --- a/frontend/pages/cluster/_id.vue +++ b/frontend/pages/cluster/_id.vue @@ -1,7 +1,7 @@