diff --git a/.github/workflows/dev.yaml b/.github/workflows/dev.yaml index 20415b94..c7403b32 100644 --- a/.github/workflows/dev.yaml +++ b/.github/workflows/dev.yaml @@ -32,7 +32,7 @@ jobs: uses: Cerebellum-Network/reusable-workflows/.github/workflows/build-and-upload-static.yaml@1.0.0 with: runs-on: '["self-hosted", "cere-network-large"]' - build_container: 'node:14-buster-slim' + build_container: 'node:16' deploy_container: 'ubuntu:20.04' install_packages_command: 'cp -rf frontend/frontend.config-cere-dev.js frontend/frontend.config.js; yarn install' build_command: 'yarn workspace frontend generate' diff --git a/backend/backend.config.js b/backend/backend.config.js index 8c02c235..353c9296 100644 --- a/backend/backend.config.js +++ b/backend/backend.config.js @@ -65,7 +65,6 @@ module.exports = { name: 'ddc', enabled: !process.env.DDC_DISABLE, crawler: './crawlers/ddc.js', - // TODO update default to Mainnet once DDC Mainnet deployed. Ticket: https://cerenetwork.atlassian.net/browse/CBI-2050 contractRpc: process.env.DDC_CONTRACT_RPC || 'wss://rpc.testnet.cere.network/ws', contractName: process.env.DDC_CONTRACT_NAME || 'ddc_bucket', contractAddress: process.env.DDC_CONTRACT_ADDRESS || '5DTZfAcmZctJodfa4W88BW5QXVBxT4v7UEax91HZCArTih6U', @@ -73,5 +72,14 @@ module.exports = { parseInt(process.env.DDC_POLLING_TIME_MS, 10) || 2 * 60 * 1000, // 2 minutes }, + { + name: 'ddc2', + enabled: !process.env.DDC2_DISABLE, + crawler: './crawlers/ddc2.js', + startDelay: 0, + pollingTime: + parseInt(process.env.DDC2_POLLING_TIME_MS, 10) + || 2 * 60 * 1000, // 2 minutes + }, ], }; diff --git a/backend/crawlers/ddc2.js b/backend/crawlers/ddc2.js new file mode 100644 index 00000000..22d21b72 --- /dev/null +++ b/backend/crawlers/ddc2.js @@ -0,0 +1,224 @@ +// @ts-check +const pino = require('pino'); +const { + wait, + getClient, + getPolkadotAPI, + isNodeSynced, + dbParamQuery, +} = require('../lib/utils'); +const backendConfig = require('../backend.config'); + +const crawlerName = 'ddc2'; +const logger = pino({ + level: backendConfig.logLevel, +}); +const loggerOptions = { + crawler: crawlerName, +}; +const config = backendConfig.crawlers.find( + ({ name }) => name === crawlerName, +); + +const actualizeDdcClusters = async (api, dbClient) => { + const response = await api.query.ddcClusters.clusters.entries(); + + const clusters = response.map((entity) => entity[1].toHuman()); + + const persistClusterQuery = ` + INSERT INTO cluster ( + id, + manager_id, + reserve_id, + node_provider_auth_contract, + erasure_coding_required, + erasure_coding_total, + replication_total + ) VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7 + ) + ON CONFLICT (id) + DO UPDATE SET + id = EXCLUDED.id, + manager_id = EXCLUDED.manager_id, + reserve_id = EXCLUDED.reserve_id, + node_provider_auth_contract = EXCLUDED.node_provider_auth_contract, + erasure_coding_required = EXCLUDED.erasure_coding_required, + erasure_coding_total = EXCLUDED.erasure_coding_total, + replication_total = EXCLUDED.replication_total + ;`; + + // eslint-disable-next-line no-restricted-syntax + for (const cluster of clusters) { + const clusterData = [ + cluster.clusterId, + cluster.managerId, + cluster.reserveId, + cluster.props.nodeProviderAuthContract, + cluster.props.erasureCodingRequired, + cluster.props.erasureCodingTotal, + cluster.props.replicationTotal, + ]; + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistClusterQuery, clusterData, loggerOptions); + } +}; + +const actualizeDdcNodes = async (api, dbClient) => { + const response = await api.query.ddcNodes.storageNodes.entries(); + + const nodes = response.map((entity) => entity[1].toHuman()); + + const persistNodeQuery = ` + INSERT INTO node ( + id, + host, + domain, + ssl, + http_port, + grpc_port, + p2p_port, + mode + ) VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + $8 + ) + ON CONFLICT (id) + DO UPDATE SET + id = EXCLUDED.id, + host = EXCLUDED.host, + domain = EXCLUDED.domain, + ssl = EXCLUDED.ssl, + http_port = EXCLUDED.http_port, + grpc_port = EXCLUDED.grpc_port, + p2p_port = EXCLUDED.p2p_port, + mode = EXCLUDED.mode + ;`; + + const persistNodeToClusterQuery = ` + INSERT INTO node_to_cluster ( + cluster_id, + node_id + ) VALUES ( + $1, + $2 + ) + ON CONFLICT (cluster_id, node_id) + DO UPDATE SET + cluster_id = EXCLUDED.cluster_id, + node_id = EXCLUDED.node_id + ;`; + + const persistProviderQuery = ` + INSERT INTO node_provider ( + id + ) VALUES ( + $1 + ) + ON CONFLICT (id) + DO UPDATE SET + id = EXCLUDED.id + ;`; + + const persistNodeToProviderQuery = ` + INSERT INTO node_to_node_provider ( + node_provider_id, + node_id + ) VALUES ( + $1, + $2 + ) + ON CONFLICT (node_provider_id, node_id) + DO UPDATE SET + node_provider_id = EXCLUDED.node_provider_id, + node_id = EXCLUDED.node_id + ;`; + + // eslint-disable-next-line no-restricted-syntax + for (const node of nodes) { + const nodeData = [ + node.pubKey, + node.props.host, + node.props.domain, + node.props.ssl, + Number(node.props.httpPort.replace(/,/g, '')), + Number(node.props.grpcPort.replace(/,/g, '')), + Number(node.props.p2pPort.replace(/,/g, '')), + node.props.mode, + ]; + const providerData = [node.providerId]; + const nodeToProviderData = [node.providerId, node.pubKey]; + const nodeClusterData = [node.clusterId, node.pubKey]; + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistProviderQuery, providerData, loggerOptions); + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistNodeQuery, nodeData, loggerOptions); + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistNodeToClusterQuery, nodeClusterData, loggerOptions); + + // eslint-disable-next-line no-await-in-loop + await dbParamQuery(dbClient, persistNodeToProviderQuery, nodeToProviderData, loggerOptions); + } +}; + +const crawler = async (delayedStart) => { + if (delayedStart) { + logger.debug(loggerOptions, `Delaying DDC crawler start for ${config.startDelay / 1000}s`); + await wait(config.startDelay); + } + + logger.info(loggerOptions, 'Running DDC crawler...'); + + const dbClient = await getClient(loggerOptions); + const api = await getPolkadotAPI(loggerOptions, config.apiCustomTypes); + + let synced = await isNodeSynced(api, loggerOptions); + while (!synced) { + // eslint-disable-next-line no-await-in-loop + await wait(10000); + // eslint-disable-next-line no-await-in-loop + synced = await isNodeSynced(api, loggerOptions); + } + + const startTime = new Date().getTime(); + + await actualizeDdcClusters(api, dbClient); + await actualizeDdcNodes(api, dbClient); + + logger.debug(loggerOptions, 'Disconnecting from API'); + await api.disconnect().catch((error) => logger.error(loggerOptions, `API disconnect error: ${JSON.stringify(error)}`)); + + logger.debug(loggerOptions, 'Disconnecting from DB'); + await dbClient.end().catch((error) => logger.error(loggerOptions, `DB disconnect error: ${JSON.stringify(error)}`)); + + const endTime = new Date().getTime(); + logger.info(loggerOptions, `Processed in ${((endTime - startTime) / 1000).toFixed(0)}s`); + + logger.info(loggerOptions, `Next execution in ${(config.pollingTime / 60000).toFixed(0)}m...`); + setTimeout( + () => crawler(false), + config.pollingTime, + ); +}; + +crawler(true).catch((error) => { + // eslint-disable-next-line no-console + console.error(error); + process.exit(-1); +}); diff --git a/backend/docker/docker-compose-cere-mainnet.yml b/backend/docker/docker-compose-cere-mainnet.yml index 60a89ba3..2d128ff0 100644 --- a/backend/docker/docker-compose-cere-mainnet.yml +++ b/backend/docker/docker-compose-cere-mainnet.yml @@ -73,7 +73,7 @@ services: # Hasura # graphql-engine: - image: hasura/graphql-engine:v1.3.3 + image: hasura/graphql-engine:v2.16.0-ce ports: - '8082:8080' depends_on: @@ -83,6 +83,7 @@ services: HASURA_GRAPHQL_DATABASE_URL: 'postgres://polkastats:polkastats@postgres:5432/polkastats' HASURA_GRAPHQL_ENABLE_CONSOLE: 'true' # set to 'false' to disable console HASURA_GRAPHQL_ENABLED_LOG_TYPES: 'startup, http-log, webhook-log, websocket-log, query-log' + HASURA_GRAPHQL_V1_BOOLEAN_NULL_COLLAPSE: true HASURA_GRAPHQL_CORS_DOMAIN: http://localhost:3000 HASURA_GRAPHQL_UNAUTHORIZED_ROLE: public HASURA_GRAPHQL_ADMIN_SECRET: my_admin_secret_key diff --git a/db/migrations/20240606112923-token-utility.js b/db/migrations/20240606112923-token-utility.js new file mode 100644 index 00000000..124dfe67 --- /dev/null +++ b/db/migrations/20240606112923-token-utility.js @@ -0,0 +1,53 @@ +'use strict'; + +var dbm; +var type; +var seed; +var fs = require('fs'); +var path = require('path'); +var Promise; + +/** + * We receive the dbmigrate dependency from dbmigrate initially. + * This enables us to not have to rely on NODE_PATH. + */ +exports.setup = function(options, seedLink) { + dbm = options.dbmigrate; + type = dbm.dataType; + seed = seedLink; + Promise = options.Promise; +}; + +exports.up = function(db) { + var filePath = path.join(__dirname, 'sqls', '20240606112923-token-utility-up.sql'); + return new Promise( function( resolve, reject ) { + fs.readFile(filePath, {encoding: 'utf-8'}, function(err,data){ + if (err) return reject(err); + console.log('received data: ' + data); + + resolve(data); + }); + }) + .then(function(data) { + return db.runSql(data); + }); +}; + +exports.down = function(db) { + var filePath = path.join(__dirname, 'sqls', '20240606112923-token-utility-down.sql'); + return new Promise( function( resolve, reject ) { + fs.readFile(filePath, {encoding: 'utf-8'}, function(err,data){ + if (err) return reject(err); + console.log('received data: ' + data); + + resolve(data); + }); + }) + .then(function(data) { + return db.runSql(data); + }); +}; + +exports._meta = { + "version": 1 +}; diff --git a/db/migrations/sqls/20240606112923-token-utility-down.sql b/db/migrations/sqls/20240606112923-token-utility-down.sql new file mode 100644 index 00000000..3372eb44 --- /dev/null +++ b/db/migrations/sqls/20240606112923-token-utility-down.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS ddc_metric_value; + +DROP TABLE IF EXISTS cluster; + +DROP TABLE IF EXISTS node; + +DROP TABLE IF EXISTS node_provider; + +DROP TABLE IF EXISTS node_provider_reward; + +DROP TABLE IF EXISTS node_to_cluster; + +DROP TABLE IF EXISTS node_to_node_provider; \ No newline at end of file diff --git a/db/migrations/sqls/20240606112923-token-utility-up.sql b/db/migrations/sqls/20240606112923-token-utility-up.sql new file mode 100644 index 00000000..12ff8685 --- /dev/null +++ b/db/migrations/sqls/20240606112923-token-utility-up.sql @@ -0,0 +1,75 @@ +CREATE TABLE cluster ( + id TEXT PRIMARY KEY, + manager_id TEXT NOT NULL, + reserve_id TEXT NOT NULL, + node_provider_auth_contract TEXT, + erasure_coding_required INT, + erasure_coding_total INT, + replication_total INT +); + +CREATE TABLE node_provider ( + id TEXT PRIMARY KEY +); + +CREATE TABLE node( + id TEXT PRIMARY KEY, + total_capacity BIGINT, + uptime INTERVAL, + throughput BIGINT, + node_type TEXT, + tier SMALLINT, + host TEXT, + domain TEXT, + ssl BOOLEAN, + http_port SMALLINT, + grpc_port SMALLINT, + p2p_port SMALLINT, + mode TEXT +); + +CREATE TABLE node_to_node_provider ( + id SERIAL PRIMARY KEY, + node_provider_id TEXT NOT NULL, + node_id TEXT NOT NULL, + FOREIGN KEY (node_provider_id) REFERENCES node_provider(id), + FOREIGN KEY (node_id) REFERENCES node(id), + CONSTRAINT unique_node_provider_node_id UNIQUE (node_provider_id, node_id) +); + +CREATE TABLE node_provider_reward ( + id SERIAL PRIMARY KEY, + node_provider_id TEXT NOT NULL, + rewarded NUMERIC(39, 0), + expected_to_reward NUMERIC(39, 0), + era INTEGER, + batch_index SMALLINT, + stored_bytes BIGINT, + transferred_bytes BIGINT, + puts NUMERIC(39, 0), + gets NUMERIC(39, 0), + explorer_link TEXT, + FOREIGN KEY (node_provider_id) REFERENCES node_provider(id) +); + +CREATE TABLE node_to_cluster ( + id SERIAL PRIMARY KEY, + cluster_id TEXT, + node_id TEXT NOT NULL, + FOREIGN KEY (cluster_id) REFERENCES cluster(id), + FOREIGN KEY (node_id) REFERENCES node(id), + UNIQUE (cluster_id, node_id) +); + +CREATE OR REPLACE VIEW node_provider_stats AS +SELECT + node.id AS node_id, + node.node_type, + node_to_cluster.cluster_id, + nnp.node_provider_id +FROM + node + JOIN + node_to_cluster ON node.id = node_to_cluster.node_id + JOIN + node_to_node_provider nnp on node.id = nnp.node_id; \ No newline at end of file diff --git a/frontend/assets/scss/themes/polkastats.scss b/frontend/assets/scss/themes/polkastats.scss index 86c9adce..5a69f767 100644 --- a/frontend/assets/scss/themes/polkastats.scss +++ b/frontend/assets/scss/themes/polkastats.scss @@ -264,6 +264,40 @@ h6 { } } +// cluster page +#cluster-table th { + text-align: center; +} +#rewards-table th { + text-align: center; +} +.page-cluster .identicon { + display: inline-block; +} +.page-cluster td div { + padding: 0 !important; +} +.mw240 { + min-width: 240px; +} +@media (max-width: 767.98px) { + .page-cluster + .table.b-table.b-table-stacked-md + > tbody + > tr + > [data-label]::before { + display: none; + } + .page-cluster + .table.b-table.b-table-stacked-md + > tbody + > tr + > [data-label] + > div { + width: 100%; + } +} + // block page .block-table tr td:first-child { width: 30%; diff --git a/frontend/components/Header.vue b/frontend/components/Header.vue index 79b10e1d..b15c4d60 100644 --- a/frontend/components/Header.vue +++ b/frontend/components/Header.vue @@ -37,6 +37,11 @@ {{ $t('layout.default.events') }} + + + {{ $t('layout.default.dragon_one') }} + + {{ $t('layout.default.accounts') }} @@ -50,11 +55,14 @@ diff --git a/frontend/frontend.config.js b/frontend/frontend.config.js index 8e1ca0dd..5e5738f8 100644 --- a/frontend/frontend.config.js +++ b/frontend/frontend.config.js @@ -6,8 +6,8 @@ export const network = { ss58Format: 54, coinGeckoDenom: 'cere-network', nodeWs: 'wss://archive.mainnet.cere.network/ws', - backendWs: 'wss://hasura.stats.cere.network/v1/graphql', - backendHttp: 'https://api.stats.cere.network/api/v1', + backendWs: 'ws://localhost:8082/v1/graphql', + backendHttp: 'http://localhost:8080/api/v1', googleAnalytics: '', theme: '@/assets/scss/themes/polkastats.scss', // ranking Í diff --git a/frontend/locales/en.json b/frontend/locales/en.json index 393b3fb1..e43b82cf 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -91,7 +91,8 @@ "events": "Events", "validator": "Validator", "how_to_stake": "How to stake", - "economics": "Economics" + "economics": "Economics", + "dragon_one": "Dragon 1" }, "error": { "page_not_found": "Page not found!", @@ -119,6 +120,24 @@ "loading_data": "Loading data, it can take a long time. Be patient, please", "download_csv": "Download in CSV format" }, + "cluster": { + "cluster_dashboard": "Cluster Dashboard", + "cluster_id": "Cluster ID", + "providers": "Providers", + "throughput": "Throughput (bytes)", + "node_type": "Node Type", + "node_provider_id": "Node Provider", + "rewards": "Rewards for the last Era", + "gets": "Successful Reads in the last Era", + "puts": "Successful Writes in the last Era", + "historical_rewards": "Historical Rewards", + "block": "Block", + "event": "Event", + "event_index": "Event Index", + "rewarded_provider": "Rewarded Provider", + "reward_amount": "Rewards (CERE)", + "era": "Era" + }, "account": { "head_title": "Cere Stats - Account {address}", "head_content": "{networkName} Account {address}", diff --git a/frontend/mixins/commonMixin.js b/frontend/mixins/commonMixin.js index a6e1c624..b8d48c98 100644 --- a/frontend/mixins/commonMixin.js +++ b/frontend/mixins/commonMixin.js @@ -122,5 +122,40 @@ export default { const date = moment.unix(timestamp) return moment(date).fromNow() }, + filterByUniqueProviderLastEra: (data) => { + const seenProviders = {} + const result = [] + + for (let i = data.length - 1; i >= 0; i--) { + const element = data[i] + const providerId = element.node_provider_id + + if ( + !Object.hasOwn(seenProviders, 'providerId') || + element.era > seenProviders[providerId].era + ) { + seenProviders[providerId] = element + result.push(element) + } + } + return result.reverse() + }, + mergeArraysByProp: (arr1, arr2, prop) => { + const map = new Map() + + arr1.forEach((obj) => { + map.set(obj[prop], { ...obj }) + }) + + arr2.forEach((obj) => { + if (map.has(obj[prop])) { + map.set(obj[prop], { ...map.get(obj[prop]), ...obj }) + } else { + map.set(obj[prop], { ...obj }) + } + }) + + return Array.from(map.values()) + }, }, } diff --git a/frontend/pages/accounts.vue b/frontend/pages/accounts.vue index d43d287c..efcbe77c 100644 --- a/frontend/pages/accounts.vue +++ b/frontend/pages/accounts.vue @@ -89,10 +89,7 @@