diff --git a/Cargo.lock b/Cargo.lock index c02ee6338d93a..4ecd6e978094f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1764,6 +1764,7 @@ dependencies = [ "oxc_ast", "oxc_ast_visit", "oxc_codegen", + "oxc_estree_tokens", "oxc_formatter", "oxc_isolated_declarations", "oxc_linter", diff --git a/tasks/benchmark/Cargo.toml b/tasks/benchmark/Cargo.toml index da51f72234bd5..26779697c4f28 100644 --- a/tasks/benchmark/Cargo.toml +++ b/tasks/benchmark/Cargo.toml @@ -67,6 +67,7 @@ oxc_allocator = { workspace = true, optional = true } oxc_ast = { workspace = true, optional = true, features = ["serialize"] } oxc_ast_visit = { workspace = true, optional = true, features = ["serialize"] } oxc_codegen = { workspace = true, optional = true, features = ["sourcemap"] } +oxc_estree_tokens = { workspace = true, optional = true } oxc_formatter = { workspace = true, optional = true } oxc_isolated_declarations = { workspace = true, optional = true } oxc_linter = { workspace = true, optional = true } @@ -103,6 +104,7 @@ compiler = [ "dep:oxc_ast", "dep:oxc_ast_visit", "dep:oxc_codegen", + "dep:oxc_estree_tokens", "dep:oxc_formatter", "dep:oxc_isolated_declarations", "dep:oxc_mangler", diff --git a/tasks/benchmark/benches/parser.rs b/tasks/benchmark/benches/parser.rs index 09429d7e59b22..8a7027756850b 100644 --- a/tasks/benchmark/benches/parser.rs +++ b/tasks/benchmark/benches/parser.rs @@ -1,44 +1,89 @@ use oxc_allocator::Allocator; use oxc_ast_visit::utf8_to_utf16::Utf8ToUtf16; use oxc_benchmark::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; -use oxc_parser::{ParseOptions, Parser}; +use oxc_estree_tokens::{EstreeTokenOptions, collect_token_context, to_estree_tokens_json}; +use oxc_parser::{ParseOptions, Parser, ParserReturn, config::RuntimeParserConfig}; use oxc_tasks_common::TestFiles; fn bench_parser(criterion: &mut Criterion) { let mut group = criterion.benchmark_group("parser"); + + for file in TestFiles::minimal().files() { + let id = BenchmarkId::from_parameter(&file.file_name); + let source_text = &file.source_text; + let source_type = file.source_type; + + group.bench_function(id, |b| { + // Do not include initializing allocator in benchmark. + // User code would likely reuse the same allocator over and over to parse multiple files, + // so we do the same here. + let mut allocator = Allocator::default(); + + b.iter(|| { + Parser::new(&allocator, source_text, source_type) + .with_options(ParseOptions { + parse_regular_expression: true, + ..ParseOptions::default() + }) + .parse(); + allocator.reset(); + }); + }); + } + + group.finish(); +} + +fn bench_parser_tokens(criterion: &mut Criterion) { + let mut group = criterion.benchmark_group("parser_tokens"); + for file in TestFiles::minimal().files() { let id = BenchmarkId::from_parameter(&file.file_name); let source_text = &file.source_text; let source_type = file.source_type; + group.bench_function(id, |b| { // Do not include initializing allocator in benchmark. // User code would likely reuse the same allocator over and over to parse multiple files, // so we do the same here. let mut allocator = Allocator::default(); + b.iter(|| { + // Use `RuntimeParserConfig` (runtime config), same as NAPI parser package will. + // `bench_parser` uses `NoTokensParserConfig` (implicitly as default). + // Usually it's inadvisable to use 2 different configs in the same application, + // but this is just a benchmark, and it's better if we don't entwine this benchmark with `bench_parser`. + let config = RuntimeParserConfig::new(true); + Parser::new(&allocator, source_text, source_type) .with_options(ParseOptions { parse_regular_expression: true, ..ParseOptions::default() }) + .with_config(config) .parse(); + allocator.reset(); }); }); } + group.finish(); } fn bench_estree(criterion: &mut Criterion) { let mut group = criterion.benchmark_group("estree"); + for file in TestFiles::complicated().files().iter().take(1) { let id = BenchmarkId::from_parameter(&file.file_name); let source_text = &file.source_text; let source_type = file.source_type; let mut allocator = Allocator::default(); + group.bench_function(id, |b| { b.iter_with_setup_wrapper(|runner| { allocator.reset(); + let mut program = Parser::new(&allocator, source_text, source_type) .with_options(ParseOptions { parse_regular_expression: true, @@ -46,6 +91,7 @@ fn bench_estree(criterion: &mut Criterion) { }) .parse() .program; + runner.run(|| { let span_converter = Utf8ToUtf16::new(program.source_text); span_converter.convert_program(&mut program); @@ -57,8 +103,64 @@ fn bench_estree(criterion: &mut Criterion) { }); }); } + + group.finish(); +} + +fn bench_estree_tokens(criterion: &mut Criterion) { + let mut group = criterion.benchmark_group("estree_tokens"); + + for file in TestFiles::complicated().files().iter().take(1) { + let id = BenchmarkId::from_parameter(&file.file_name); + let source_text = &file.source_text; + let source_type = file.source_type; + let mut allocator = Allocator::default(); + + group.bench_function(id, |b| { + b.iter_with_setup_wrapper(|runner| { + allocator.reset(); + + // Use `RuntimeParserConfig` (runtime config), same as NAPI parser package will. + // `bench_estree` uses `NoTokensParserConfig` (implicitly as default). + // Usually it's inadvisable to use 2 different configs in the same application, + // but this is just a benchmark, and it's better if we don't entwine this benchmark with `bench_estree`. + let config = RuntimeParserConfig::new(true); + + let ret = Parser::new(&allocator, source_text, source_type) + .with_options(ParseOptions { + parse_regular_expression: true, + ..ParseOptions::default() + }) + .with_config(config) + .parse(); + let ParserReturn { mut program, tokens, .. } = ret; + + // Span conversion of AST is not performed in measured section, as we only want to measure tokens + let span_converter = Utf8ToUtf16::new(program.source_text); + span_converter.convert_program(&mut program); + + runner.run(|| { + let token_options = EstreeTokenOptions::test262(); + let token_context = collect_token_context(&program, token_options); + let tokens_json = to_estree_tokens_json( + &allocator, + source_text, + &tokens, + &token_context, + token_options, + ); + let tokens_json = black_box(tokens_json); + // Allocate into tokens JSON into arena, same as linter and NAPI parser package do + let _tokens_json = allocator.alloc_str(&tokens_json); + + program + }); + }); + }); + } + group.finish(); } -criterion_group!(parser, bench_parser, bench_estree); +criterion_group!(parser, bench_parser, bench_parser_tokens, bench_estree, bench_estree_tokens); criterion_main!(parser);