Skip to content

Commit

Permalink
feat(best): class-ify runners
Browse files Browse the repository at this point in the history
  • Loading branch information
zerious committed Sep 20, 2018
1 parent ac254c5 commit 212a168
Show file tree
Hide file tree
Showing 5 changed files with 282 additions and 259 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import * as path from 'path';
import { run } from '../index';
import Runner from '../index';

const runner = new Runner();

const BENCHMARK_CONFIG = {
benchmarkName: 'test',
Expand All @@ -21,7 +23,7 @@ const MOCK_MESSAGER = {

describe('run', () => {
test('result', async () => {
const results = await run(BENCHMARK_CONFIG, PROJECT_CONFIG, GLOBAL_CONFIG, MOCK_MESSAGER);
const results = await runner.run(BENCHMARK_CONFIG, PROJECT_CONFIG, GLOBAL_CONFIG, MOCK_MESSAGER);
expect(results).toMatchObject({
environment: {
browser: expect.any(Object),
Expand All @@ -34,7 +36,7 @@ describe('run', () => {

test('benchmarkIterations config', async () => {
const iterations = 3;
const { results } = await run(
const { results } = await runner.run(
BENCHMARK_CONFIG,
{
benchmarkIterations: iterations,
Expand All @@ -48,7 +50,7 @@ describe('run', () => {

// TODO: Find a proper way to test maxDuration without make the unit tests too slow
// test.skip('maxDuration', async () => {
// const results = await run(BENCHMARK_CONFIG, {
// const results = await runner.run(BENCHMARK_CONFIG, {
// benchmarkMaxDuration: 20,
// }, GLOBAL_CONFIG, MOCK_MESSAGER);

Expand All @@ -57,7 +59,7 @@ describe('run', () => {

test('benchmarkMinIterations config', async () => {
const minIterations = 3;
const { results } = await run(
const { results } = await runner.run(
BENCHMARK_CONFIG,
{
benchmarkMaxDuration: -1,
Expand All @@ -78,7 +80,7 @@ describe('errors', () => {
benchmarkEntry: path.resolve(__dirname, 'fixtures', 'syntax-error.html'),
};

return expect(run(benchmarkConfig, PROJECT_CONFIG, GLOBAL_CONFIG, MOCK_MESSAGER)).rejects.toThrow(
return expect(runner.run(benchmarkConfig, PROJECT_CONFIG, GLOBAL_CONFIG, MOCK_MESSAGER)).rejects.toThrow(
/Benchmark parse error/,
);
});
Expand All @@ -89,7 +91,7 @@ describe('errors', () => {
benchmarkEntry: path.resolve(__dirname, 'fixtures', 'runtime-error.html'),
};

return expect(run(benchmarkConfig, PROJECT_CONFIG, GLOBAL_CONFIG, MOCK_MESSAGER)).rejects.toThrow(
return expect(runner.run(benchmarkConfig, PROJECT_CONFIG, GLOBAL_CONFIG, MOCK_MESSAGER)).rejects.toThrow(
/I fail at runtime/,
);
});
Expand All @@ -110,7 +112,7 @@ describe('messager', () => {
},
};

await run(BENCHMARK_CONFIG, PROJECT_CONFIG, GLOBAL_CONFIG, messager);
await runner.run(BENCHMARK_CONFIG, PROJECT_CONFIG, GLOBAL_CONFIG, messager);

expect(calls).toEqual([
['start', BENCHMARK_CONFIG.benchmarkName],
Expand Down
268 changes: 135 additions & 133 deletions packages/best-runner-headless/src/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,154 +17,156 @@ const BROWSER_ARGS = [
const UPDATE_INTERVAL = 300;
const PUPPETEER_OPTIONS = { args: BROWSER_ARGS };

async function runIteration(page, state, opts) {
// eslint-disable-next-line no-undef
return page.evaluate(o => BEST.runBenchmark(o), opts);
}

async function runClientIterations(page, state, opts, messager) {
// Run an iteration to estimate the time it will take
const testResult = await runIteration(page, state, { iterations: 1 });
const estimatedIterationTime = testResult.executedTime;

const start = Date.now();
// eslint-disable-next-line lwc/no-set-interval
const intervalId = setInterval(() => {
const executing = Date.now() - start;
state.executedTime = executing;
state.executedIterations = Math.round(executing / estimatedIterationTime);
messager.updateBenchmarkProgress(state, opts);
}, UPDATE_INTERVAL);
export default class Runner {
async run({ benchmarkName, benchmarkEntry }, projectConfig, globalConfig, messager) {
const opts = this.normalizeRuntimeOptions(projectConfig);
const state = this.initializeBenchmarkState(opts);
const { projectName } = projectConfig;

let browser;
let parseError;
try {
browser = await puppeteer.launch(PUPPETEER_OPTIONS);
const environment = await this.normalizeEnvironment(browser, projectConfig, globalConfig);

messager.onBenchmarkStart(benchmarkName, projectName);

const page = await browser.newPage();
page.on('pageerror', (err) => (parseError = err));
await page.goto('file:///' + benchmarkEntry);

// page.goto() will wait for the onload
// if we caught something that throws there is a parsing error in the benchmark code
if (parseError) {
messager.onBenchmarkError(benchmarkName, projectName);
parseError.message = 'Benchmark parse error.\n' + parseError.message;
throw parseError;
}

const { results } = await this.runIterations(page, state, opts, messager);
return { results, environment };
} catch (e) {
messager.onBenchmarkError(benchmarkName, projectName);
throw e;
} finally {
messager.onBenchmarkEnd(benchmarkName, projectName);

await page.reload();
const clientRawResults = await runIteration(page, state, opts);
clearInterval(intervalId);
if (browser) {
await browser.close();
}
}
}

const results = clientRawResults.results;
state.results.push(...results);
async runIteration(page, state, opts) {
// eslint-disable-next-line no-undef
return page.evaluate(o => BEST.runBenchmark(o), opts);
}

return state;
}
async runClientIterations(page, state, opts, messager) {
// Run an iteration to estimate the time it will take
const testResult = await this.runIteration(page, state, { iterations: 1 });
const estimatedIterationTime = testResult.executedTime;

async function runServerIterations(page, state, opts, messager) {
if (state.executedTime < opts.maxDuration || state.executedIterations < opts.minSampleCount) {
const start = Date.now();
const results = await runIteration(page, state, opts);
await page.reload();
state.executedTime += Date.now() - start;
state.executedIterations += 1;
state.results.push(results.results[0]);
messager.updateBenchmarkProgress(state, opts);
return runIterations(page, state, opts, messager);
}
// eslint-disable-next-line lwc/no-set-interval
const intervalId = setInterval(() => {
const executing = Date.now() - start;
state.executedTime = executing;
state.executedIterations = Math.round(executing / estimatedIterationTime);
messager.updateBenchmarkProgress(state, opts);
}, UPDATE_INTERVAL);

return state;
}
await page.reload();
const clientRawResults = await this.runIteration(page, state, opts);
clearInterval(intervalId);

async function runIterations(page, state, opts, messager) {
// TODO: Throw on timeouts, current logic is
// currently non-existant for clientside iteration mode
// if (state.executedTime > opts.maxDuration) {
// throw new Error('Benchmark timmed out');
// }
const results = clientRawResults.results;
state.results.push(...results);

if (state.iterateOnClient) {
return runClientIterations(page, state, opts, messager);
return state;
}
return runServerIterations(page, state, opts, messager);
}

function normalizeRuntimeOptions(projectConfig) {
const { benchmarkIterations, benchmarkOnClient } = projectConfig;
const definedIterations = Number.isInteger(benchmarkIterations);
// For benchmarking on the client or a defined number of iterations duration is irrelevant
const maxDuration = definedIterations ? 1 : projectConfig.benchmarkMaxDuration;
const minSampleCount = definedIterations ? benchmarkIterations : projectConfig.benchmarkMinIterations;

return {
maxDuration,
minSampleCount,
iterations: benchmarkIterations,
iterateOnClient: benchmarkOnClient,
};
}

function initializeBenchmarkState(opts) {
return {
executedTime: 0,
executedIterations: 0,
results: [],
iterateOnClient: opts.iterateOnClient,
};
}

async function normalizeEnvironment(browser, projectConfig, globalConfig) {
const {
benchmarkOnClient,
benchmarkRunner,
benchmarkEnvironment,
benchmarkIterations,
projectName,
} = projectConfig;
const { system, cpu, os, load } = await getSystemInfo();
const version = await browser.version();
return {
hardware: { system, cpu, os },
runtime: { load },
browser: { version, options: BROWSER_ARGS },
configuration: {
project: {
projectName,
benchmarkOnClient,
benchmarkRunner,
benchmarkEnvironment,
benchmarkIterations,
},
global: {
gitCommitHash: globalConfig.gitCommit,
gitHasLocalChanges: globalConfig.gitLocalChanges,
gitBranch: globalConfig.gitBranch,
gitRepository: globalConfig.gitRepository,
},
},
};
}

export async function run({ benchmarkName, benchmarkEntry }, projectConfig, globalConfig, messager) {
const opts = normalizeRuntimeOptions(projectConfig);
const state = initializeBenchmarkState(opts);
const { projectName } = projectConfig;

let browser;
let parseError;
try {
browser = await puppeteer.launch(PUPPETEER_OPTIONS);
const environment = await normalizeEnvironment(browser, projectConfig, globalConfig);
async runServerIterations(page, state, opts, messager) {
if (state.executedTime < opts.maxDuration || state.executedIterations < opts.minSampleCount) {
const start = Date.now();
const results = await this.runIteration(page, state, opts);
await page.reload();
state.executedTime += Date.now() - start;
state.executedIterations += 1;
state.results.push(results.results[0]);
messager.updateBenchmarkProgress(state, opts);
return this.runIterations(page, state, opts, messager);
}

messager.onBenchmarkStart(benchmarkName, projectName);
return state;
}

const page = await browser.newPage();
page.on('pageerror', (err) => (parseError = err));
await page.goto('file:///' + benchmarkEntry);
async runIterations(page, state, opts, messager) {
// TODO: Throw on timeouts, current logic is
// currently non-existant for clientside iteration mode
// if (state.executedTime > opts.maxDuration) {
// throw new Error('Benchmark timmed out');
// }

// page.goto() will wait for the onload
// if we caught something that throws there is a parsing error in the benchmark code
if (parseError) {
messager.onBenchmarkError(benchmarkName, projectName);
parseError.message = 'Benchmark parse error.\n' + parseError.message;
throw parseError;
if (state.iterateOnClient) {
return this.runClientIterations(page, state, opts, messager);
}
return this.runServerIterations(page, state, opts, messager);
}

const { results } = await runIterations(page, state, opts, messager);
return { results, environment };
} catch (e) {
messager.onBenchmarkError(benchmarkName, projectName);
throw e;
} finally {
messager.onBenchmarkEnd(benchmarkName, projectName);
normalizeRuntimeOptions(projectConfig) {
const { benchmarkIterations, benchmarkOnClient } = projectConfig;
const definedIterations = Number.isInteger(benchmarkIterations);
// For benchmarking on the client or a defined number of iterations duration is irrelevant
const maxDuration = definedIterations ? 1 : projectConfig.benchmarkMaxDuration;
const minSampleCount = definedIterations ? benchmarkIterations : projectConfig.benchmarkMinIterations;

return {
maxDuration,
minSampleCount,
iterations: benchmarkIterations,
iterateOnClient: benchmarkOnClient,
};
}

if (browser) {
await browser.close();
}
initializeBenchmarkState(opts) {
return {
executedTime: 0,
executedIterations: 0,
results: [],
iterateOnClient: opts.iterateOnClient,
};
}

async normalizeEnvironment(browser, projectConfig, globalConfig) {
const {
benchmarkOnClient,
benchmarkRunner,
benchmarkEnvironment,
benchmarkIterations,
projectName,
} = projectConfig;
const { system, cpu, os, load } = await getSystemInfo();
const version = await browser.version();
return {
hardware: { system, cpu, os },
runtime: { load },
browser: { version, options: BROWSER_ARGS },
configuration: {
project: {
projectName,
benchmarkOnClient,
benchmarkRunner,
benchmarkEnvironment,
benchmarkIterations,
},
global: {
gitCommitHash: globalConfig.gitCommit,
gitHasLocalChanges: globalConfig.gitLocalChanges,
gitBranch: globalConfig.gitBranch,
gitRepository: globalConfig.gitRepository,
},
},
};
}
}
8 changes: 5 additions & 3 deletions packages/best-runner-remote/src/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ function proxifyRunner(benchmarkEntryBundle, runnerConfig, projectConfig, global
});
}

export function run(benchmarkEntryBundle, projectConfig, globalConfig, messager) {
const { benchmarkRunnerConfig } = projectConfig;
return proxifyRunner(benchmarkEntryBundle, benchmarkRunnerConfig, projectConfig, globalConfig, messager);
export class Runner {
run(benchmarkEntryBundle, projectConfig, globalConfig, messager) {
const { benchmarkRunnerConfig } = projectConfig;
return proxifyRunner(benchmarkEntryBundle, benchmarkRunnerConfig, projectConfig, globalConfig, messager);
}
}
Loading

0 comments on commit 212a168

Please sign in to comment.