Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Create tables #189

Draft
wants to merge 19 commits into
base: dev-cere
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/dev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
uses: Cerebellum-Network/reusable-workflows/.github/workflows/[email protected]
with:
runs-on: '["self-hosted", "cere-network-large"]'
build_container: 'node:14-buster-slim'
build_container: 'node:16'
deploy_container: 'ubuntu:20.04'
install_packages_command: 'cp -rf frontend/frontend.config-cere-dev.js frontend/frontend.config.js; yarn install'
build_command: 'yarn workspace frontend generate'
Expand Down
10 changes: 9 additions & 1 deletion backend/backend.config.js
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,21 @@ module.exports = {
name: 'ddc',
enabled: !process.env.DDC_DISABLE,
crawler: './crawlers/ddc.js',
// TODO update default to Mainnet once DDC Mainnet deployed. Ticket: https://cerenetwork.atlassian.net/browse/CBI-2050
contractRpc: process.env.DDC_CONTRACT_RPC || 'wss://rpc.testnet.cere.network/ws',
contractName: process.env.DDC_CONTRACT_NAME || 'ddc_bucket',
contractAddress: process.env.DDC_CONTRACT_ADDRESS || '5DTZfAcmZctJodfa4W88BW5QXVBxT4v7UEax91HZCArTih6U',
pollingTime:
parseInt(process.env.DDC_POLLING_TIME_MS, 10)
|| 2 * 60 * 1000, // 2 minutes
},
{
name: 'ddc2',
enabled: !process.env.DDC2_DISABLE,
crawler: './crawlers/ddc2.js',
startDelay: 0,
pollingTime:
parseInt(process.env.DDC2_POLLING_TIME_MS, 10)
|| 2 * 60 * 1000, // 2 minutes
},
],
};
224 changes: 224 additions & 0 deletions backend/crawlers/ddc2.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
// @ts-check
const pino = require('pino');
const {
wait,
getClient,
getPolkadotAPI,
isNodeSynced,
dbParamQuery,
} = require('../lib/utils');
const backendConfig = require('../backend.config');

const crawlerName = 'ddc2';
const logger = pino({
level: backendConfig.logLevel,
});
const loggerOptions = {
crawler: crawlerName,
};
const config = backendConfig.crawlers.find(
({ name }) => name === crawlerName,
);

const actualizeDdcClusters = async (api, dbClient) => {
const response = await api.query.ddcClusters.clusters.entries();

const clusters = response.map((entity) => entity[1].toHuman());

const persistClusterQuery = `
INSERT INTO cluster (
id,
manager_id,
reserve_id,
node_provider_auth_contract,
erasure_coding_required,
erasure_coding_total,
replication_total
) VALUES (
$1,
$2,
$3,
$4,
$5,
$6,
$7
)
ON CONFLICT (id)
DO UPDATE SET
id = EXCLUDED.id,
manager_id = EXCLUDED.manager_id,
reserve_id = EXCLUDED.reserve_id,
node_provider_auth_contract = EXCLUDED.node_provider_auth_contract,
erasure_coding_required = EXCLUDED.erasure_coding_required,
erasure_coding_total = EXCLUDED.erasure_coding_total,
replication_total = EXCLUDED.replication_total
;`;

// eslint-disable-next-line no-restricted-syntax
for (const cluster of clusters) {
const clusterData = [
cluster.clusterId,
cluster.managerId,
cluster.reserveId,
cluster.props.nodeProviderAuthContract,
cluster.props.erasureCodingRequired,
cluster.props.erasureCodingTotal,
cluster.props.replicationTotal,
];

// eslint-disable-next-line no-await-in-loop
await dbParamQuery(dbClient, persistClusterQuery, clusterData, loggerOptions);
}
};

const actualizeDdcNodes = async (api, dbClient) => {
const response = await api.query.ddcNodes.storageNodes.entries();

const nodes = response.map((entity) => entity[1].toHuman());

const persistNodeQuery = `
INSERT INTO node (
id,
host,
domain,
ssl,
http_port,
grpc_port,
p2p_port,
mode
) VALUES (
$1,
$2,
$3,
$4,
$5,
$6,
$7,
$8
)
ON CONFLICT (id)
DO UPDATE SET
id = EXCLUDED.id,
host = EXCLUDED.host,
domain = EXCLUDED.domain,
ssl = EXCLUDED.ssl,
http_port = EXCLUDED.http_port,
grpc_port = EXCLUDED.grpc_port,
p2p_port = EXCLUDED.p2p_port,
mode = EXCLUDED.mode
;`;

const persistNodeToClusterQuery = `
INSERT INTO node_to_cluster (
cluster_id,
node_id
) VALUES (
$1,
$2
)
ON CONFLICT (cluster_id, node_id)
DO UPDATE SET
cluster_id = EXCLUDED.cluster_id,
node_id = EXCLUDED.node_id
;`;

const persistProviderQuery = `
INSERT INTO node_provider (
id
) VALUES (
$1
)
ON CONFLICT (id)
DO UPDATE SET
id = EXCLUDED.id
;`;

const persistNodeToProviderQuery = `
INSERT INTO node_to_node_provider (
node_provider_id,
node_id
) VALUES (
$1,
$2
)
ON CONFLICT (node_provider_id, node_id)
DO UPDATE SET
node_provider_id = EXCLUDED.node_provider_id,
node_id = EXCLUDED.node_id
;`;

// eslint-disable-next-line no-restricted-syntax
for (const node of nodes) {
const nodeData = [
node.pubKey,
node.props.host,
node.props.domain,
node.props.ssl,
Number(node.props.httpPort.replace(/,/g, '')),
Number(node.props.grpcPort.replace(/,/g, '')),
Number(node.props.p2pPort.replace(/,/g, '')),
node.props.mode,
];
const providerData = [node.providerId];
const nodeToProviderData = [node.providerId, node.pubKey];
const nodeClusterData = [node.clusterId, node.pubKey];

// eslint-disable-next-line no-await-in-loop
await dbParamQuery(dbClient, persistProviderQuery, providerData, loggerOptions);

// eslint-disable-next-line no-await-in-loop
await dbParamQuery(dbClient, persistNodeQuery, nodeData, loggerOptions);

// eslint-disable-next-line no-await-in-loop
await dbParamQuery(dbClient, persistNodeToClusterQuery, nodeClusterData, loggerOptions);

// eslint-disable-next-line no-await-in-loop
await dbParamQuery(dbClient, persistNodeToProviderQuery, nodeToProviderData, loggerOptions);
}
};

const crawler = async (delayedStart) => {
if (delayedStart) {
logger.debug(loggerOptions, `Delaying DDC crawler start for ${config.startDelay / 1000}s`);
await wait(config.startDelay);
}

logger.info(loggerOptions, 'Running DDC crawler...');

const dbClient = await getClient(loggerOptions);
const api = await getPolkadotAPI(loggerOptions, config.apiCustomTypes);

let synced = await isNodeSynced(api, loggerOptions);
while (!synced) {
// eslint-disable-next-line no-await-in-loop
await wait(10000);
// eslint-disable-next-line no-await-in-loop
synced = await isNodeSynced(api, loggerOptions);
}

const startTime = new Date().getTime();

await actualizeDdcClusters(api, dbClient);
await actualizeDdcNodes(api, dbClient);

logger.debug(loggerOptions, 'Disconnecting from API');
await api.disconnect().catch((error) => logger.error(loggerOptions, `API disconnect error: ${JSON.stringify(error)}`));

logger.debug(loggerOptions, 'Disconnecting from DB');
await dbClient.end().catch((error) => logger.error(loggerOptions, `DB disconnect error: ${JSON.stringify(error)}`));

const endTime = new Date().getTime();
logger.info(loggerOptions, `Processed in ${((endTime - startTime) / 1000).toFixed(0)}s`);

logger.info(loggerOptions, `Next execution in ${(config.pollingTime / 60000).toFixed(0)}m...`);
setTimeout(
() => crawler(false),
config.pollingTime,
);
};

crawler(true).catch((error) => {
// eslint-disable-next-line no-console
console.error(error);
process.exit(-1);
});
3 changes: 2 additions & 1 deletion backend/docker/docker-compose-cere-mainnet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ services:
# Hasura
#
graphql-engine:
image: hasura/graphql-engine:v1.3.3
image: hasura/graphql-engine:v2.16.0-ce
ports:
- '8082:8080'
depends_on:
Expand All @@ -83,6 +83,7 @@ services:
HASURA_GRAPHQL_DATABASE_URL: 'postgres://polkastats:polkastats@postgres:5432/polkastats'
HASURA_GRAPHQL_ENABLE_CONSOLE: 'true' # set to 'false' to disable console
HASURA_GRAPHQL_ENABLED_LOG_TYPES: 'startup, http-log, webhook-log, websocket-log, query-log'
HASURA_GRAPHQL_V1_BOOLEAN_NULL_COLLAPSE: true
HASURA_GRAPHQL_CORS_DOMAIN: http://localhost:3000
HASURA_GRAPHQL_UNAUTHORIZED_ROLE: public
HASURA_GRAPHQL_ADMIN_SECRET: my_admin_secret_key
Expand Down
53 changes: 53 additions & 0 deletions db/migrations/20240606112923-token-utility.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
'use strict';

var dbm;
var type;
var seed;
var fs = require('fs');
var path = require('path');
var Promise;

/**
* We receive the dbmigrate dependency from dbmigrate initially.
* This enables us to not have to rely on NODE_PATH.
*/
exports.setup = function(options, seedLink) {
dbm = options.dbmigrate;
type = dbm.dataType;
seed = seedLink;
Promise = options.Promise;
};

exports.up = function(db) {
var filePath = path.join(__dirname, 'sqls', '20240606112923-token-utility-up.sql');
return new Promise( function( resolve, reject ) {
fs.readFile(filePath, {encoding: 'utf-8'}, function(err,data){
if (err) return reject(err);
console.log('received data: ' + data);

resolve(data);
});
})
.then(function(data) {
return db.runSql(data);
});
};

exports.down = function(db) {
var filePath = path.join(__dirname, 'sqls', '20240606112923-token-utility-down.sql');
return new Promise( function( resolve, reject ) {
fs.readFile(filePath, {encoding: 'utf-8'}, function(err,data){
if (err) return reject(err);
console.log('received data: ' + data);

resolve(data);
});
})
.then(function(data) {
return db.runSql(data);
});
};

exports._meta = {
"version": 1
};
13 changes: 13 additions & 0 deletions db/migrations/sqls/20240606112923-token-utility-down.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
DROP TABLE IF EXISTS ddc_metric_value;

DROP TABLE IF EXISTS cluster;

DROP TABLE IF EXISTS node;

DROP TABLE IF EXISTS node_provider;

DROP TABLE IF EXISTS node_provider_reward;

DROP TABLE IF EXISTS node_to_cluster;

DROP TABLE IF EXISTS node_to_node_provider;
Loading