diff --git a/prisma-fmt/src/code_actions.rs b/prisma-fmt/src/code_actions.rs index 4f072f60b414..371e791e49cd 100644 --- a/prisma-fmt/src/code_actions.rs +++ b/prisma-fmt/src/code_actions.rs @@ -31,8 +31,13 @@ pub(crate) fn available_actions(schema: String, params: CodeActionParams) -> Vec let datasource = config.datasources.first(); - for source in validated_schema.db.ast().sources() { - relation_mode::edit_referential_integrity(&mut actions, ¶ms, validated_schema.db.source(), source) + for source in validated_schema.db.ast_assert_single().sources() { + relation_mode::edit_referential_integrity( + &mut actions, + ¶ms, + validated_schema.db.source_assert_single(), + source, + ) } // models AND views @@ -45,21 +50,27 @@ pub(crate) fn available_actions(schema: String, params: CodeActionParams) -> Vec multi_schema::add_schema_block_attribute_model( &mut actions, ¶ms, - validated_schema.db.source(), + validated_schema.db.source_assert_single(), config, model, ); - multi_schema::add_schema_to_schemas(&mut actions, ¶ms, validated_schema.db.source(), config, model); + multi_schema::add_schema_to_schemas( + &mut actions, + ¶ms, + validated_schema.db.source_assert_single(), + config, + model, + ); } if matches!(datasource, Some(ds) if ds.active_provider == "mongodb") { - mongodb::add_at_map_for_id(&mut actions, ¶ms, validated_schema.db.source(), model); + mongodb::add_at_map_for_id(&mut actions, ¶ms, validated_schema.db.source_assert_single(), model); mongodb::add_native_for_auto_id( &mut actions, ¶ms, - validated_schema.db.source(), + validated_schema.db.source_assert_single(), model, datasource.unwrap(), ); @@ -71,7 +82,7 @@ pub(crate) fn available_actions(schema: String, params: CodeActionParams) -> Vec multi_schema::add_schema_block_attribute_enum( &mut actions, ¶ms, - validated_schema.db.source(), + validated_schema.db.source_assert_single(), config, enumerator, ) @@ -88,7 +99,7 @@ pub(crate) fn available_actions(schema: String, params: CodeActionParams) -> Vec relations::add_referenced_side_unique( &mut actions, ¶ms, - validated_schema.db.source(), + validated_schema.db.source_assert_single(), complete_relation, ); @@ -96,7 +107,7 @@ pub(crate) fn available_actions(schema: String, params: CodeActionParams) -> Vec relations::add_referencing_side_unique( &mut actions, ¶ms, - validated_schema.db.source(), + validated_schema.db.source_assert_single(), complete_relation, ); } @@ -105,7 +116,7 @@ pub(crate) fn available_actions(schema: String, params: CodeActionParams) -> Vec relations::add_index_for_relation_fields( &mut actions, ¶ms, - validated_schema.db.source(), + validated_schema.db.source_assert_single(), complete_relation.referencing_field(), ); } @@ -114,7 +125,7 @@ pub(crate) fn available_actions(schema: String, params: CodeActionParams) -> Vec relation_mode::replace_set_default_mysql( &mut actions, ¶ms, - validated_schema.db.source(), + validated_schema.db.source_assert_single(), complete_relation, config, ) diff --git a/prisma-fmt/src/code_actions/multi_schema.rs b/prisma-fmt/src/code_actions/multi_schema.rs index 0e47a008a910..7e6aa9ceaf80 100644 --- a/prisma-fmt/src/code_actions/multi_schema.rs +++ b/prisma-fmt/src/code_actions/multi_schema.rs @@ -142,7 +142,7 @@ pub(super) fn add_schema_to_schemas( formatted_attribute, true, // todo: update spans so that we can just append to the end of the _inside_ of the array. Instead of needing to re-append the `]` or taking the span end -1 - Span::new(span.start, span.end - 1), + Span::new(span.start, span.end - 1, psl::parser_database::FileId::ZERO), params, ) } diff --git a/prisma-fmt/src/lib.rs b/prisma-fmt/src/lib.rs index 0449faf52665..ada79cd7290b 100644 --- a/prisma-fmt/src/lib.rs +++ b/prisma-fmt/src/lib.rs @@ -225,7 +225,7 @@ pub(crate) fn range_to_span(range: Range, document: &str) -> ast::Span { let start = position_to_offset(&range.start, document).unwrap(); let end = position_to_offset(&range.end, document).unwrap(); - ast::Span::new(start, end) + ast::Span::new(start, end, psl::parser_database::FileId::ZERO) } /// Gives the LSP position right after the given span. diff --git a/prisma-fmt/src/text_document_completion.rs b/prisma-fmt/src/text_document_completion.rs index 4df8f3e91471..caca887c6ac6 100644 --- a/prisma-fmt/src/text_document_completion.rs +++ b/prisma-fmt/src/text_document_completion.rs @@ -41,7 +41,7 @@ pub(crate) fn completion(schema: String, params: CompletionParams) -> Completion let db = { let mut diag = Diagnostics::new(); - ParserDatabase::new(source_file, &mut diag) + ParserDatabase::new_single_file(source_file, &mut diag) }; let ctx = CompletionContext { @@ -91,7 +91,7 @@ impl<'a> CompletionContext<'a> { } fn push_ast_completions(ctx: CompletionContext<'_>, completion_list: &mut CompletionList) { - match ctx.db.ast().find_at_position(ctx.position) { + match ctx.db.ast_assert_single().find_at_position(ctx.position) { ast::SchemaPosition::Model( _model_id, ast::ModelPosition::Field(_, ast::FieldPosition::Attribute("relation", _, Some(attr_name))), @@ -190,7 +190,7 @@ fn ds_has_prop(ctx: CompletionContext<'_>, prop: &str) -> bool { fn push_namespaces(ctx: CompletionContext<'_>, completion_list: &mut CompletionList) { for (namespace, _) in ctx.namespaces() { - let insert_text = if add_quotes(ctx.params, ctx.db.source()) { + let insert_text = if add_quotes(ctx.params, ctx.db.source_assert_single()) { format!(r#""{namespace}""#) } else { namespace.to_string() diff --git a/prisma-fmt/src/text_document_completion/datasource.rs b/prisma-fmt/src/text_document_completion/datasource.rs index 02b7d9f4377b..22da182868ae 100644 --- a/prisma-fmt/src/text_document_completion/datasource.rs +++ b/prisma-fmt/src/text_document_completion/datasource.rs @@ -144,7 +144,7 @@ pub(super) fn url_env_db_completion(completion_list: &mut CompletionList, kind: _ => unreachable!(), }; - let insert_text = if add_quotes(ctx.params, ctx.db.source()) { + let insert_text = if add_quotes(ctx.params, ctx.db.source_assert_single()) { format!(r#""{text}""#) } else { text.to_owned() diff --git a/prisma-fmt/tests/code_actions/test_api.rs b/prisma-fmt/tests/code_actions/test_api.rs index 2be0c978aa82..ff874cf86997 100644 --- a/prisma-fmt/tests/code_actions/test_api.rs +++ b/prisma-fmt/tests/code_actions/test_api.rs @@ -19,8 +19,8 @@ fn parse_schema_diagnostics(file: impl Into) -> Option) -> Option Span { - Span { start, end } + pub fn new(start: usize, end: usize, file_id: FileId) -> Span { + Span { start, end, file_id } } /// Creates a new empty span. pub fn empty() -> Span { - Span { start: 0, end: 0 } + Span { + start: 0, + end: 0, + file_id: FileId::ZERO, + } } /// Is the given position inside the span? (boundaries included) @@ -27,11 +42,12 @@ impl Span { } } -impl From> for Span { - fn from(s: pest::Span<'_>) -> Self { +impl From<(FileId, pest::Span<'_>)> for Span { + fn from((file_id, s): (FileId, pest::Span<'_>)) -> Self { Span { start: s.start(), end: s.end(), + file_id, } } } diff --git a/psl/parser-database/src/attributes.rs b/psl/parser-database/src/attributes.rs index e944b2fdc8ce..0d0bbfe786d3 100644 --- a/psl/parser-database/src/attributes.rs +++ b/psl/parser-database/src/attributes.rs @@ -23,12 +23,16 @@ pub(super) fn resolve_attributes(ctx: &mut Context<'_>) { visit_relation_field_attributes(rfid, ctx); } - for top in ctx.ast.iter_tops() { + for top in ctx.iter_tops() { match top { - (ast::TopId::Model(model_id), ast::Top::Model(_)) => resolve_model_attributes(model_id, ctx), - (ast::TopId::Enum(enum_id), ast::Top::Enum(ast_enum)) => resolve_enum_attributes(enum_id, ast_enum, ctx), - (ast::TopId::CompositeType(ctid), ast::Top::CompositeType(ct)) => { - resolve_composite_type_attributes(ctid, ct, ctx) + ((file_id, ast::TopId::Model(model_id)), ast::Top::Model(_)) => { + resolve_model_attributes((file_id, model_id), ctx) + } + ((file_id, ast::TopId::Enum(enum_id)), ast::Top::Enum(ast_enum)) => { + resolve_enum_attributes((file_id, enum_id), ast_enum, ctx) + } + ((file_id, ast::TopId::CompositeType(ctid)), ast::Top::CompositeType(ct)) => { + resolve_composite_type_attributes((file_id, ctid), ct, ctx) } _ => (), } @@ -36,14 +40,14 @@ pub(super) fn resolve_attributes(ctx: &mut Context<'_>) { } fn resolve_composite_type_attributes<'db>( - ctid: ast::CompositeTypeId, + ctid: crate::CompositeTypeId, ct: &'db ast::CompositeType, ctx: &mut Context<'db>, ) { for (field_id, field) in ct.iter_fields() { let CompositeTypeField { r#type, .. } = ctx.types.composite_type_fields[&(ctid, field_id)]; - ctx.visit_attributes((ctid, field_id).into()); + ctx.visit_attributes((ctid.0, (ctid.1, field_id))); if let ScalarFieldType::BuiltInScalar(_scalar_type) = r#type { // native type attributes @@ -52,7 +56,7 @@ fn resolve_composite_type_attributes<'db>( (ctid, field_id), datasource_name, type_name, - &ctx.ast[args], + &ctx.asts[args], ctx, ) } @@ -74,11 +78,11 @@ fn resolve_composite_type_attributes<'db>( } } -fn resolve_enum_attributes<'db>(enum_id: ast::EnumId, ast_enum: &'db ast::Enum, ctx: &mut Context<'db>) { +fn resolve_enum_attributes<'db>(enum_id: crate::EnumId, ast_enum: &'db ast::Enum, ctx: &mut Context<'db>) { let mut enum_attributes = EnumAttributes::default(); for value_idx in 0..ast_enum.values.len() { - ctx.visit_attributes((enum_id, value_idx as u32).into()); + ctx.visit_attributes((enum_id.0, (enum_id.1, value_idx as u32))); // @map if ctx.visit_optional_single_attr("map") { if let Some(mapped_name) = map::visit_map_attribute(ctx) { @@ -93,7 +97,7 @@ fn resolve_enum_attributes<'db>(enum_id: ast::EnumId, ast_enum: &'db ast::Enum, // Now validate the enum attributes. - ctx.visit_attributes(enum_id.into()); + ctx.visit_attributes(enum_id); // @@map if ctx.visit_optional_single_attr("map") { @@ -114,7 +118,7 @@ fn resolve_enum_attributes<'db>(enum_id: ast::EnumId, ast_enum: &'db ast::Enum, ctx.validate_visited_attributes(); } -fn resolve_model_attributes(model_id: ast::ModelId, ctx: &mut Context<'_>) { +fn resolve_model_attributes(model_id: crate::ModelId, ctx: &mut Context<'_>) { let mut model_attributes = ModelAttributes::default(); // First resolve all the attributes defined on fields **in isolation**. @@ -123,7 +127,7 @@ fn resolve_model_attributes(model_id: ast::ModelId, ctx: &mut Context<'_>) { } // Resolve all the attributes defined on the model itself **in isolation**. - ctx.visit_attributes(model_id.into()); + ctx.visit_attributes(model_id); // @@ignore if ctx.visit_optional_single_attr("ignore") { @@ -185,7 +189,7 @@ fn visit_scalar_field_attributes( r#type, .. } = ctx.types[scalar_field_id]; - let ast_model = &ctx.ast[model_id]; + let ast_model = &ctx.asts[model_id]; let ast_field = &ast_model[field_id]; ctx.visit_scalar_field_attributes(model_id, field_id); @@ -240,7 +244,7 @@ fn visit_scalar_field_attributes( if let ScalarFieldType::BuiltInScalar(_scalar_type) = r#type { // native type attributes if let Some((datasource_name, type_name, attribute_id)) = ctx.visit_datasource_scoped() { - let attribute = &ctx.ast[attribute_id]; + let attribute = &ctx.asts[attribute_id]; native_types::visit_model_field_native_type_attribute( scalar_field_id, datasource_name, @@ -297,7 +301,7 @@ fn visit_field_unique(scalar_field_id: ScalarFieldId, model_data: &mut ModelAttr let attribute_id = ctx.current_attribute_id(); model_data.ast_indexes.push(( - attribute_id, + attribute_id.1, IndexAttribute { r#type: IndexType::Unique, fields: vec![FieldWithArgs { @@ -316,8 +320,8 @@ fn visit_field_unique(scalar_field_id: ScalarFieldId, model_data: &mut ModelAttr fn visit_relation_field_attributes(rfid: RelationFieldId, ctx: &mut Context<'_>) { let RelationField { model_id, field_id, .. } = ctx.types[rfid]; - let ast_field = &ctx.ast[model_id][field_id]; - ctx.visit_attributes((model_id, field_id).into()); + let ast_field = &ctx.asts[model_id][field_id]; + ctx.visit_attributes((model_id.0, (model_id.1, field_id))); // @relation // Relation attributes are not required at this stage. @@ -364,7 +368,7 @@ fn visit_relation_field_attributes(rfid: RelationFieldId, ctx: &mut Context<'_>) for underlying_field in ctx.types[rfid].fields.iter().flatten() { let ScalarField { model_id, field_id, .. } = ctx.types[*underlying_field]; - suggested_fields.push(ctx.ast[model_id][field_id].name()); + suggested_fields.push(ctx.asts[model_id][field_id].name()); } let suggestion = match suggested_fields.len() { @@ -391,7 +395,7 @@ fn visit_relation_field_attributes(rfid: RelationFieldId, ctx: &mut Context<'_>) ctx.validate_visited_attributes(); } -fn visit_model_ignore(model_id: ast::ModelId, model_data: &mut ModelAttributes, ctx: &mut Context<'_>) { +fn visit_model_ignore(model_id: crate::ModelId, model_data: &mut ModelAttributes, ctx: &mut Context<'_>) { let ignored_field_errors: Vec<_> = ctx .types .range_model_scalar_fields(model_id) @@ -400,7 +404,7 @@ fn visit_model_ignore(model_id: ast::ModelId, model_data: &mut ModelAttributes, DatamodelError::new_attribute_validation_error( "Fields on an already ignored Model do not need an `@ignore` annotation.", "@ignore", - ctx.ast[sf.model_id][sf.field_id].span(), + ctx.asts[sf.model_id][sf.field_id].span(), ) }) .collect(); @@ -413,7 +417,7 @@ fn visit_model_ignore(model_id: ast::ModelId, model_data: &mut ModelAttributes, } /// Validate @@fulltext on models -fn model_fulltext(data: &mut ModelAttributes, model_id: ast::ModelId, ctx: &mut Context<'_>) { +fn model_fulltext(data: &mut ModelAttributes, model_id: crate::ModelId, ctx: &mut Context<'_>) { let mut index_attribute = IndexAttribute { r#type: IndexType::Fulltext, ..Default::default() @@ -440,11 +444,11 @@ fn model_fulltext(data: &mut ModelAttributes, model_id: ast::ModelId, ctx: &mut index_attribute.mapped_name = mapped_name; - data.ast_indexes.push((ctx.current_attribute_id(), index_attribute)); + data.ast_indexes.push((ctx.current_attribute_id().1, index_attribute)); } /// Validate @@index on models. -fn model_index(data: &mut ModelAttributes, model_id: ast::ModelId, ctx: &mut Context<'_>) { +fn model_index(data: &mut ModelAttributes, model_id: crate::ModelId, ctx: &mut Context<'_>) { let mut index_attribute = IndexAttribute { r#type: IndexType::Normal, ..Default::default() @@ -514,11 +518,11 @@ fn model_index(data: &mut ModelAttributes, model_id: ast::ModelId, ctx: &mut Con index_attribute.algorithm = algo; index_attribute.clustered = validate_clustering_setting(ctx); - data.ast_indexes.push((ctx.current_attribute_id(), index_attribute)); + data.ast_indexes.push((ctx.current_attribute_id().1, index_attribute)); } /// Validate @@unique on models. -fn model_unique(data: &mut ModelAttributes, model_id: ast::ModelId, ctx: &mut Context<'_>) { +fn model_unique(data: &mut ModelAttributes, model_id: crate::ModelId, ctx: &mut Context<'_>) { let mut index_attribute = IndexAttribute { r#type: IndexType::Unique, ..Default::default() @@ -533,7 +537,7 @@ fn model_unique(data: &mut ModelAttributes, model_id: ast::ModelId, ctx: &mut Co let current_attribute = ctx.current_attribute(); let current_attribute_id = ctx.current_attribute_id(); - let ast_model = &ctx.ast[model_id]; + let ast_model = &ctx.asts[model_id]; let name = get_name_argument(ctx); let mapped_name = { @@ -570,12 +574,12 @@ fn model_unique(data: &mut ModelAttributes, model_id: ast::ModelId, ctx: &mut Co index_attribute.mapped_name = mapped_name; index_attribute.clustered = validate_clustering_setting(ctx); - data.ast_indexes.push((current_attribute_id, index_attribute)); + data.ast_indexes.push((current_attribute_id.1, index_attribute)); } fn common_index_validations( index_data: &mut IndexAttribute, - model_id: ast::ModelId, + model_id: crate::ModelId, resolving: FieldResolvingSetup, ctx: &mut Context<'_>, ) { @@ -599,9 +603,9 @@ fn common_index_validations( if !unresolvable_fields.is_empty() { let fields = unresolvable_fields .iter() - .map(|(top_id, field_name)| match top_id { + .map(|((file_id, top_id), field_name)| match top_id { ast::TopId::CompositeType(ctid) => { - let composite_type = &ctx.ast[*ctid].name(); + let composite_type = &ctx.asts[(*file_id, *ctid)].name(); Cow::from(format!("{field_name} in type {composite_type}")) } @@ -616,7 +620,7 @@ fn common_index_validations( if index_data.is_unique() { "unique " } else { "" }, fields.join(", "), ); - let model_name = ctx.ast[model_id].name(); + let model_name = ctx.asts[model_id].name(); DatamodelError::new_model_validation_error(message, "model", model_name, current_attribute.span) }); } @@ -636,7 +640,7 @@ fn common_index_validations( .flatten(); for underlying_field in fields { let ScalarField { model_id, field_id, .. } = ctx.types[*underlying_field]; - suggested_fields.push(ctx.ast[model_id][field_id].name()); + suggested_fields.push(ctx.asts[model_id][field_id].name()); } } @@ -658,7 +662,7 @@ fn common_index_validations( suggestion = suggestion ), "model", - ctx.ast[model_id].name(), + ctx.asts[model_id].name(), current_attribute.span, )); } @@ -667,9 +671,9 @@ fn common_index_validations( } /// @relation validation for relation fields. -fn visit_relation(model_id: ast::ModelId, relation_field_id: RelationFieldId, ctx: &mut Context<'_>) { +fn visit_relation(model_id: crate::ModelId, relation_field_id: RelationFieldId, ctx: &mut Context<'_>) { let attr = ctx.current_attribute(); - ctx.types[relation_field_id].relation_attribute = Some(ctx.current_attribute_id()); + ctx.types[relation_field_id].relation_attribute = Some(ctx.current_attribute_id().1); if let Some(fields) = ctx.visit_optional_arg("fields") { let fields = match resolve_field_array_without_args(fields, attr.span, model_id, ctx) { @@ -724,7 +728,7 @@ fn visit_relation(model_id: ast::ModelId, relation_field_id: RelationFieldId, ct unknown_fields, }) => { if !unknown_fields.is_empty() { - let model_name = ctx.ast[ctx.types[relation_field_id].referenced_model].name(); + let model_name = ctx.asts[ctx.types[relation_field_id].referenced_model].name(); let field_names = unknown_fields .into_iter() @@ -742,7 +746,7 @@ fn visit_relation(model_id: ast::ModelId, relation_field_id: RelationFieldId, ct if !relation_fields.is_empty() { let msg = format!( "The argument `references` must refer only to scalar fields in the related model `{}`. But it is referencing the following relation fields: {}", - ctx.ast[ctx.types[relation_field_id].referenced_model].name(), + ctx.asts[ctx.types[relation_field_id].referenced_model].name(), relation_fields.iter().map(|(f, _)| f.name()).collect::>().join(", "), ); ctx.push_error(DatamodelError::new_validation_error(&msg, attr.span)); @@ -806,7 +810,7 @@ enum FieldResolutionError<'ast> { AlreadyDealtWith, ProblematicFields { /// Fields that do not exist on the model. - unknown_fields: Vec<(ast::TopId, &'ast str)>, + unknown_fields: Vec<(crate::TopId, &'ast str)>, /// Fields that exist on the model but are relation fields. relation_fields: Vec<(&'ast ast::Field, ast::FieldId)>, }, @@ -818,9 +822,10 @@ enum FieldResolutionError<'ast> { fn resolve_field_array_without_args<'db>( values: &'db ast::Expression, attribute_span: ast::Span, - model_id: ast::ModelId, + model_id: crate::ModelId, ctx: &mut Context<'db>, ) -> Result, FieldResolutionError<'db>> { + let file_id = model_id.0; let constant_array = match coerce_array(values, &coerce::constant, ctx.diagnostics) { Some(values) => values, None => { @@ -831,11 +836,11 @@ fn resolve_field_array_without_args<'db>( let mut field_ids: Vec = Vec::with_capacity(constant_array.len()); let mut unknown_fields = Vec::new(); let mut relation_fields = Vec::new(); - let ast_model = &ctx.ast[model_id]; + let ast_model = &ctx.asts[model_id]; for field_name in constant_array { if field_name.contains('.') { - unknown_fields.push((ast::TopId::Model(model_id), field_name)); + unknown_fields.push(((file_id, ast::TopId::Model(model_id.1)), field_name)); continue; } @@ -843,7 +848,7 @@ fn resolve_field_array_without_args<'db>( let field_id = if let Some(field_id) = ctx.find_model_field(model_id, field_name) { field_id } else { - unknown_fields.push((ast::TopId::Model(model_id), field_name)); + unknown_fields.push(((file_id, ast::TopId::Model(model_id.1)), field_name)); continue; }; @@ -851,7 +856,7 @@ fn resolve_field_array_without_args<'db>( let sfid = if let Some(sfid) = ctx.types.find_model_scalar_field(model_id, field_id) { sfid } else { - relation_fields.push((&ctx.ast[model_id][field_id], field_id)); + relation_fields.push((&ctx.asts[model_id][field_id], field_id)); continue; }; @@ -900,10 +905,11 @@ impl FieldResolvingSetup { fn resolve_field_array_with_args<'db>( values: &'db ast::Expression, attribute_span: ast::Span, - model_id: ast::ModelId, + model_id: crate::ModelId, resolving: FieldResolvingSetup, ctx: &mut Context<'db>, ) -> Result, FieldResolutionError<'db>> { + let file_id = model_id.0; let constant_array = match crate::types::index_fields::coerce_field_array_with_args(values, ctx.diagnostics) { Some(values) => values, None => return Err(FieldResolutionError::AlreadyDealtWith), @@ -913,12 +919,12 @@ fn resolve_field_array_with_args<'db>( let mut unknown_fields = Vec::new(); let mut relation_fields = Vec::new(); - let ast_model = &ctx.ast[model_id]; + let ast_model = &ctx.asts[model_id]; 'fields: for attrs in &constant_array { let path = if attrs.field_name.contains('.') { if !resolving.follow_composites() { - unknown_fields.push((ast::TopId::Model(model_id), attrs.field_name)); + unknown_fields.push(((file_id, ast::TopId::Model(model_id.1)), attrs.field_name)); continue 'fields; } @@ -930,7 +936,7 @@ fn resolve_field_array_with_args<'db>( let field_id = match ctx.find_model_field(model_id, field_shard) { Some(field_id) => field_id, None => { - unknown_fields.push((ast::TopId::Model(model_id), field_shard)); + unknown_fields.push(((file_id, ast::TopId::Model(model_id.1)), field_shard)); continue 'fields; } }; @@ -938,14 +944,14 @@ fn resolve_field_array_with_args<'db>( let sfid = if let Some(sfid) = ctx.types.find_model_scalar_field(model_id, field_id) { sfid } else { - relation_fields.push((&ctx.ast[model_id][field_id], field_id)); + relation_fields.push((&ctx.asts[model_id][field_id], field_id)); continue 'fields; }; match &ctx.types[sfid].r#type { ScalarFieldType::CompositeType(ctid) => (IndexFieldPath::new(sfid), ctid), _ => { - unknown_fields.push((ast::TopId::Model(model_id), attrs.field_name)); + unknown_fields.push(((file_id, ast::TopId::Model(model_id.1)), attrs.field_name)); continue 'fields; } } @@ -961,7 +967,7 @@ fn resolve_field_array_with_args<'db>( let field_id = match ctx.find_composite_type_field(*next_type, field_shard) { Some(field_id) => field_id, None => { - unknown_fields.push((ast::TopId::CompositeType(*next_type), field_shard)); + unknown_fields.push(((next_type.0, ast::TopId::CompositeType(next_type.1)), field_shard)); continue 'fields; } }; @@ -973,7 +979,7 @@ fn resolve_field_array_with_args<'db>( next_type = ctid; } _ if i < field_count - 1 => { - unknown_fields.push((ast::TopId::Model(model_id), attrs.field_name)); + unknown_fields.push(((model_id.0, ast::TopId::Model(model_id.1)), attrs.field_name)); continue 'fields; } _ => (), @@ -986,12 +992,12 @@ fn resolve_field_array_with_args<'db>( match ctx.types.find_model_scalar_field(model_id, field_id) { Some(sfid) => IndexFieldPath::new(sfid), None => { - relation_fields.push((&ctx.ast[model_id][field_id], field_id)); + relation_fields.push((&ctx.asts[model_id][field_id], field_id)); continue; } } } else { - unknown_fields.push((ast::TopId::Model(model_id), attrs.field_name)); + unknown_fields.push(((model_id.0, ast::TopId::Model(model_id.1)), attrs.field_name)); continue; }; @@ -1000,8 +1006,8 @@ fn resolve_field_array_with_args<'db>( let path_str = match path.field_in_index() { either::Either::Left(_) => Cow::from(attrs.field_name), either::Either::Right((ctid, field_id)) => { - let field_name = &ctx.ast[ctid][field_id].name(); - let composite_type = &ctx.ast[ctid].name(); + let field_name = &ctx.asts[ctid][field_id].name(); + let composite_type = &ctx.asts[ctid].name(); Cow::from(format!("{field_name} in type {composite_type}")) } @@ -1097,13 +1103,17 @@ fn validate_clustering_setting(ctx: &mut Context<'_>) -> Option { /// access their corresponding entries in the attributes map in the database even in the presence /// of name and type resolution errors. This is useful for the language tools. pub(super) fn create_default_attributes(ctx: &mut Context<'_>) { - for top in ctx.ast.iter_tops() { + for ((file_id, top), _) in ctx.iter_tops() { match top { - (ast::TopId::Model(model_id), ast::Top::Model(_)) => { - ctx.types.model_attributes.insert(model_id, ModelAttributes::default()); + ast::TopId::Model(model_id) => { + ctx.types + .model_attributes + .insert((file_id, model_id), ModelAttributes::default()); } - (ast::TopId::Enum(enum_id), ast::Top::Enum(_)) => { - ctx.types.enum_attributes.insert(enum_id, EnumAttributes::default()); + ast::TopId::Enum(enum_id) => { + ctx.types + .enum_attributes + .insert((file_id, enum_id), EnumAttributes::default()); } _ => (), } diff --git a/psl/parser-database/src/attributes/default.rs b/psl/parser-database/src/attributes/default.rs index dcd22d316361..e2be240f152c 100644 --- a/psl/parser-database/src/attributes/default.rs +++ b/psl/parser-database/src/attributes/default.rs @@ -9,7 +9,7 @@ use crate::{ /// @default on model scalar fields pub(super) fn visit_model_field_default( scalar_field_id: ScalarFieldId, - model_id: ast::ModelId, + model_id: crate::ModelId, field_id: ast::FieldId, r#type: ScalarFieldType, ctx: &mut Context<'_>, @@ -19,7 +19,7 @@ pub(super) fn visit_model_field_default( Err(err) => return ctx.push_error(err), }; - let ast_model = &ctx.ast[model_id]; + let ast_model = &ctx.asts[model_id]; let ast_field = &ast_model[field_id]; let mapped_name = default_attribute_mapped_name(ctx); @@ -74,7 +74,7 @@ pub(super) fn visit_model_field_default( /// @default on composite type fields pub(super) fn visit_composite_field_default( - ct_id: ast::CompositeTypeId, + ct_id: crate::CompositeTypeId, field_id: ast::FieldId, r#type: ScalarFieldType, ctx: &mut Context<'_>, @@ -84,7 +84,7 @@ pub(super) fn visit_composite_field_default( Err(err) => return ctx.push_error(err), }; - let ast_model = &ctx.ast[ct_id]; + let ast_model = &ctx.asts[ct_id]; let ast_field = &ast_model[field_id]; if ctx.visit_optional_arg("map").is_some() { @@ -181,10 +181,10 @@ fn validate_model_builtin_scalar_type_default( value: &ast::Expression, mapped_name: Option, accept: AcceptFn<'_>, - field_id: (ast::ModelId, ast::FieldId), + field_id: (crate::ModelId, ast::FieldId), ctx: &mut Context<'_>, ) { - let arity = ctx.ast[field_id.0][field_id.1].arity; + let arity = ctx.asts[field_id.0][field_id.1].arity; match (scalar_type, value) { // Functions (_, ast::Expression::Function(funcname, _, _)) if funcname == FN_AUTOINCREMENT && mapped_name.is_some() => { @@ -324,9 +324,13 @@ fn validate_invalid_function_default(fn_name: &str, scalar_type: ScalarType, ctx )); } -fn validate_default_value_on_composite_type(ctid: ast::CompositeTypeId, ast_field: &ast::Field, ctx: &mut Context<'_>) { +fn validate_default_value_on_composite_type( + ctid: crate::CompositeTypeId, + ast_field: &ast::Field, + ctx: &mut Context<'_>, +) { let attr = ctx.current_attribute(); - let ct_name = ctx.ast[ctid].name(); + let ct_name = ctx.asts[ctid].name(); ctx.push_error(DatamodelError::new_composite_type_field_validation_error( "Defaults on fields of type composite are not supported. Please remove the `@default` attribute.", @@ -395,13 +399,13 @@ fn validate_nanoid_args(args: &[ast::Argument], accept: AcceptFn<'_>, ctx: &mut fn validate_enum_default( found_value: &ast::Expression, - enum_id: ast::EnumId, + enum_id: crate::EnumId, accept: AcceptFn<'_>, ctx: &mut Context<'_>, ) { match found_value { ast::Expression::ConstantValue(enum_value, _) => { - if ctx.ast[enum_id].values.iter().any(|v| v.name() == enum_value) { + if ctx.asts[enum_id].values.iter().any(|v| v.name() == enum_value) { accept(ctx) } else { validate_invalid_default_enum_value(enum_value, ctx); @@ -413,7 +417,7 @@ fn validate_enum_default( fn validate_enum_list_default( found_value: &ast::Expression, - enum_id: ast::EnumId, + enum_id: crate::EnumId, accept: AcceptFn<'_>, ctx: &mut Context<'_>, ) { diff --git a/psl/parser-database/src/attributes/id.rs b/psl/parser-database/src/attributes/id.rs index 96892587c862..13618bbea737 100644 --- a/psl/parser-database/src/attributes/id.rs +++ b/psl/parser-database/src/attributes/id.rs @@ -10,7 +10,7 @@ use crate::{ use std::borrow::Cow; /// @@id on models -pub(super) fn model(model_data: &mut ModelAttributes, model_id: ast::ModelId, ctx: &mut Context<'_>) { +pub(super) fn model(model_data: &mut ModelAttributes, model_id: crate::ModelId, ctx: &mut Context<'_>) { let attr = ctx.current_attribute(); let fields = match ctx.visit_default_arg("fields") { Ok(value) => value, @@ -29,9 +29,9 @@ pub(super) fn model(model_data: &mut ModelAttributes, model_id: ast::ModelId, ct if !unresolvable_fields.is_empty() { let fields_str = unresolvable_fields .into_iter() - .map(|(top_id, field_name)| match top_id { + .map(|((file_id, top_id), field_name)| match top_id { ast::TopId::CompositeType(ctid) => { - let ct_name = &ctx.ast[ctid].name(); + let ct_name = ctx.asts[(file_id, ctid)].name(); Cow::from(format!("{field_name} in type {ct_name}")) } @@ -43,7 +43,7 @@ pub(super) fn model(model_data: &mut ModelAttributes, model_id: ast::ModelId, ct let msg = format!("The multi field id declaration refers to the unknown fields {fields_str}."); let error = - DatamodelError::new_model_validation_error(&msg, "model", ctx.ast[model_id].name(), fields.span()); + DatamodelError::new_model_validation_error(&msg, "model", ctx.asts[model_id].name(), fields.span()); ctx.push_error(error); } @@ -60,7 +60,7 @@ pub(super) fn model(model_data: &mut ModelAttributes, model_id: ast::ModelId, ct ctx.push_error(DatamodelError::new_model_validation_error( &msg, "model", - ctx.ast[model_id].name(), + ctx.asts[model_id].name(), attr.span, )); } @@ -69,7 +69,7 @@ pub(super) fn model(model_data: &mut ModelAttributes, model_id: ast::ModelId, ct } }; - let ast_model = &ctx.ast[model_id]; + let ast_model = &ctx.asts[model_id]; // ID attribute fields must reference only required fields. let fields_that_are_not_required: Vec<&str> = resolved_fields @@ -77,7 +77,7 @@ pub(super) fn model(model_data: &mut ModelAttributes, model_id: ast::ModelId, ct .filter_map(|field| match field.path.field_in_index() { either::Either::Left(id) => { let ScalarField { model_id, field_id, .. } = ctx.types[id]; - let field = &ctx.ast[model_id][field_id]; + let field = &ctx.asts[model_id][field_id]; if field.arity.is_required() { None @@ -86,7 +86,7 @@ pub(super) fn model(model_data: &mut ModelAttributes, model_id: ast::ModelId, ct } } either::Either::Right((ctid, field_id)) => { - let field = &ctx.ast[ctid][field_id]; + let field = &ctx.asts[ctid][field_id]; if field.arity.is_required() { None @@ -198,7 +198,7 @@ pub(super) fn field<'db>( } pub(super) fn validate_id_field_arities( - model_id: ast::ModelId, + model_id: crate::ModelId, model_attributes: &ModelAttributes, ctx: &mut Context<'_>, ) { @@ -213,7 +213,7 @@ pub(super) fn validate_id_field_arities( }; let ast_field = if let Some(field_id) = pk.source_field { - &ctx.ast[model_id][field_id] + &ctx.asts[model_id][field_id] } else { return; }; @@ -222,7 +222,7 @@ pub(super) fn validate_id_field_arities( ctx.push_error(DatamodelError::new_attribute_validation_error( "Fields that are marked as id must be required.", "@id", - ctx.ast[pk.source_attribute].span, + ctx.asts[pk.source_attribute].span, )) } } diff --git a/psl/parser-database/src/attributes/map.rs b/psl/parser-database/src/attributes/map.rs index b4bf82835eb2..d910447f96cf 100644 --- a/psl/parser-database/src/attributes/map.rs +++ b/psl/parser-database/src/attributes/map.rs @@ -19,7 +19,7 @@ pub(super) fn scalar_field( sfid: ScalarFieldId, ast_model: &ast::Model, ast_field: &ast::Field, - model_id: ast::ModelId, + model_id: crate::ModelId, field_id: ast::FieldId, ctx: &mut Context<'_>, ) { @@ -71,7 +71,7 @@ pub(super) fn scalar_field( pub(super) fn composite_type_field( ct: &ast::CompositeType, ast_field: &ast::Field, - ctid: ast::CompositeTypeId, + ctid: crate::CompositeTypeId, field_id: ast::FieldId, ctx: &mut Context<'_>, ) { diff --git a/psl/parser-database/src/attributes/native_types.rs b/psl/parser-database/src/attributes/native_types.rs index d9deccb99eb9..704df89e23ac 100644 --- a/psl/parser-database/src/attributes/native_types.rs +++ b/psl/parser-database/src/attributes/native_types.rs @@ -14,7 +14,7 @@ pub(super) fn visit_model_field_native_type_attribute( } pub(super) fn visit_composite_type_field_native_type_attribute( - id: (ast::CompositeTypeId, ast::FieldId), + id: (crate::CompositeTypeId, ast::FieldId), datasource_name: StringId, type_name: StringId, attr: &ast::Attribute, diff --git a/psl/parser-database/src/context.rs b/psl/parser-database/src/context.rs index 450146953024..6d4d72239824 100644 --- a/psl/parser-database/src/context.rs +++ b/psl/parser-database/src/context.rs @@ -3,7 +3,7 @@ mod attributes; use self::attributes::AttributesValidationState; use crate::{ ast, interner::StringInterner, names::Names, relations::Relations, types::Types, DatamodelError, Diagnostics, - StringId, + InFile, StringId, }; use schema_ast::ast::{Expression, WithName}; use std::collections::{HashMap, HashSet}; @@ -21,7 +21,7 @@ use std::collections::{HashMap, HashSet}; /// /// See `visit_attributes()`. pub(crate) struct Context<'db> { - pub(crate) ast: &'db ast::SchemaAst, + pub(crate) asts: &'db crate::Files, pub(crate) interner: &'db mut StringInterner, pub(crate) names: &'db mut Names, pub(crate) types: &'db mut Types, @@ -30,15 +30,15 @@ pub(crate) struct Context<'db> { attributes: AttributesValidationState, // state machine for attribute validation // @map'ed names indexes. These are not in the db because they are only used for validation. - pub(super) mapped_model_scalar_field_names: HashMap<(ast::ModelId, StringId), ast::FieldId>, - pub(super) mapped_composite_type_names: HashMap<(ast::CompositeTypeId, StringId), ast::FieldId>, - pub(super) mapped_enum_names: HashMap, - pub(super) mapped_enum_value_names: HashMap<(ast::EnumId, StringId), u32>, + pub(super) mapped_model_scalar_field_names: HashMap<(crate::ModelId, StringId), ast::FieldId>, + pub(super) mapped_composite_type_names: HashMap<(crate::CompositeTypeId, StringId), ast::FieldId>, + pub(super) mapped_enum_names: HashMap, + pub(super) mapped_enum_value_names: HashMap<(crate::EnumId, StringId), u32>, } impl<'db> Context<'db> { pub(super) fn new( - ast: &'db ast::SchemaAst, + asts: &'db crate::Files, interner: &'db mut StringInterner, names: &'db mut Names, types: &'db mut Types, @@ -46,7 +46,7 @@ impl<'db> Context<'db> { diagnostics: &'db mut Diagnostics, ) -> Self { Context { - ast, + asts, interner, names, types, @@ -68,7 +68,7 @@ impl<'db> Context<'db> { /// Return the attribute currently being validated. Panics if the context is not in the right /// state. #[track_caller] - pub(crate) fn current_attribute_id(&self) -> ast::AttributeId { + pub(crate) fn current_attribute_id(&self) -> crate::AttributeId { self.attributes.attribute.unwrap() } @@ -76,8 +76,7 @@ impl<'db> Context<'db> { /// state. #[track_caller] pub(crate) fn current_attribute(&self) -> &'db ast::Attribute { - let id = self.attributes.attribute.unwrap(); - &self.ast[id] + &self.asts[self.attributes.attribute.unwrap()] } /// Discard arguments without validation. @@ -102,8 +101,8 @@ impl<'db> Context<'db> { /// /// Other than for this peculiarity, this method is identical to /// `visit_attributes()`. - pub(super) fn visit_scalar_field_attributes(&mut self, model_id: ast::ModelId, field_id: ast::FieldId) { - self.visit_attributes((model_id, field_id).into()); + pub(super) fn visit_scalar_field_attributes(&mut self, model_id: crate::ModelId, field_id: ast::FieldId) { + self.visit_attributes((model_id.0, (model_id.1, field_id))); } /// All attribute validation should go through `visit_attributes()`. It lets @@ -116,7 +115,11 @@ impl<'db> Context<'db> { /// `validate_visited_arguments()`. Otherwise, Context will helpfully panic. /// - When you are done validating an attribute set, you must call /// `validate_visited_attributes()`. Otherwise, Context will helpfully panic. - pub(super) fn visit_attributes(&mut self, ast_attributes: ast::AttributeContainer) { + pub(super) fn visit_attributes(&mut self, ast_attributes: InFile) + where + T: Into, + { + let ast_attributes: crate::AttributeContainer = (ast_attributes.0, ast_attributes.1.into()); if self.attributes.attributes.is_some() || !self.attributes.unused_attributes.is_empty() { panic!( "`ctx.visit_attributes() called with {:?} while the Context is still validating previous attribute set on {:?}`", @@ -125,7 +128,8 @@ impl<'db> Context<'db> { ); } - self.attributes.set_attributes(ast_attributes, self.ast); + self.attributes + .set_attributes(ast_attributes, &self.asts[ast_attributes.0].2); } /// Look for an optional attribute with a name of the form @@ -136,8 +140,8 @@ impl<'db> Context<'db> { /// arguments to other attributes: everywhere else, attributes are named, /// with a default that can be first, but with native types, arguments are /// purely positional. - pub(crate) fn visit_datasource_scoped(&mut self) -> Option<(StringId, StringId, ast::AttributeId)> { - let attrs = iter_attributes(self.attributes.attributes.as_ref(), self.ast) + pub(crate) fn visit_datasource_scoped(&mut self) -> Option<(StringId, StringId, crate::AttributeId)> { + let attrs = iter_attributes(self.attributes.attributes.as_ref(), self.asts) .filter(|(_, attr)| attr.name.name.contains('.')); let mut native_type_attr = None; let diagnostics = &mut self.diagnostics; @@ -172,7 +176,7 @@ impl<'db> Context<'db> { #[must_use] pub(crate) fn visit_optional_single_attr(&mut self, name: &'static str) -> bool { let mut attrs = - iter_attributes(self.attributes.attributes.as_ref(), self.ast).filter(|(_, a)| a.name.name == name); + iter_attributes(self.attributes.attributes.as_ref(), self.asts).filter(|(_, a)| a.name.name == name); let (first_idx, first) = match attrs.next() { Some(first) => first, None => return false, @@ -181,7 +185,7 @@ impl<'db> Context<'db> { if attrs.next().is_some() { for (idx, attr) in - iter_attributes(self.attributes.attributes.as_ref(), self.ast).filter(|(_, a)| a.name.name == name) + iter_attributes(self.attributes.attributes.as_ref(), self.asts).filter(|(_, a)| a.name.name == name) { diagnostics.push_error(DatamodelError::new_duplicate_attribute_error( &attr.name.name, @@ -205,7 +209,7 @@ impl<'db> Context<'db> { let mut has_valid_attribute = false; while !has_valid_attribute { - let first_attr = iter_attributes(self.attributes.attributes.as_ref(), self.ast) + let first_attr = iter_attributes(self.attributes.attributes.as_ref(), self.asts) .filter(|(_, attr)| attr.name.name == name) .find(|(attr_id, _)| self.attributes.unused_attributes.contains(attr_id)); let (attr_id, attr) = if let Some(first_attr) = first_attr { @@ -267,7 +271,7 @@ impl<'db> Context<'db> { /// otherwise. pub(crate) fn validate_visited_arguments(&mut self) { let attr = if let Some(attrid) = self.attributes.attribute { - &self.ast[attrid] + &self.asts[attrid] } else { panic!("State error: missing attribute in validate_visited_arguments.") }; @@ -290,7 +294,7 @@ impl<'db> Context<'db> { let diagnostics = &mut self.diagnostics; for attribute_id in &self.attributes.unused_attributes { - let attribute = &self.ast[*attribute_id]; + let attribute = &self.asts[*attribute_id]; diagnostics.push_error(DatamodelError::new_attribute_not_known_error( &attribute.name.name, attribute.span, @@ -308,7 +312,7 @@ impl<'db> Context<'db> { } /// Find a specific field in a specific model. - pub(crate) fn find_model_field(&self, model_id: ast::ModelId, field_name: &str) -> Option { + pub(crate) fn find_model_field(&self, model_id: crate::ModelId, field_name: &str) -> Option { let name = self.interner.lookup(field_name)?; self.names.model_fields.get(&(model_id, name)).cloned() } @@ -316,7 +320,7 @@ impl<'db> Context<'db> { /// Find a specific field in a specific composite type. pub(crate) fn find_composite_type_field( &self, - composite_type_id: ast::CompositeTypeId, + composite_type_id: crate::CompositeTypeId, field_name: &str, ) -> Option { let name = self.interner.lookup(field_name)?; @@ -327,9 +331,15 @@ impl<'db> Context<'db> { .cloned() } + pub(crate) fn iter_tops(&self) -> impl Iterator + 'db { + self.asts + .iter() + .flat_map(|(file_id, _, _, ast)| ast.iter_tops().map(move |(top_id, top)| ((file_id, top_id), top))) + } + /// Starts validating the arguments for an attribute, checking for duplicate arguments in the /// process. Returns whether the attribute is valid enough to be usable. - fn set_attribute(&mut self, attribute_id: ast::AttributeId, attribute: &'db ast::Attribute) -> bool { + fn set_attribute(&mut self, attribute_id: crate::AttributeId, attribute: &'db ast::Attribute) -> bool { if self.attributes.attribute.is_some() || !self.attributes.args.is_empty() { panic!("State error: we cannot start validating new arguments before `validate_visited_arguments()` or `discard_arguments()` has been called.\n{:#?}", self.attributes); } @@ -430,13 +440,15 @@ impl<'db> Context<'db> { // Implementation detail. Used for arguments validation. fn iter_attributes<'a, 'ast: 'a>( - attrs: Option<&'a ast::AttributeContainer>, - ast: &'ast ast::SchemaAst, -) -> impl Iterator + 'a { + attrs: Option<&'a crate::AttributeContainer>, + asts: &'ast crate::Files, +) -> impl Iterator + 'a { attrs .into_iter() - .flat_map(move |container| ast[*container].iter().enumerate().map(|a| (a, *container))) - .map(|((idx, attr), container)| (ast::AttributeId::new_in_container(container, idx), attr)) + .flat_map(move |container| asts[*container].iter().enumerate().map(|a| (a, *container))) + .map(|((idx, attr), (file_id, container))| { + ((file_id, ast::AttributeId::new_in_container(container, idx)), attr) + }) } impl std::ops::Index for Context<'_> { diff --git a/psl/parser-database/src/context/attributes.rs b/psl/parser-database/src/context/attributes.rs index 9f35f5cc3644..48b75756004b 100644 --- a/psl/parser-database/src/context/attributes.rs +++ b/psl/parser-database/src/context/attributes.rs @@ -4,17 +4,19 @@ use crate::interner::StringId; #[derive(Default, Debug)] pub(super) struct AttributesValidationState { /// The attributes list being validated. - pub(super) attributes: Option, - pub(super) unused_attributes: HashSet, // the _remaining_ attributes + pub(super) attributes: Option, + pub(super) unused_attributes: HashSet, // the _remaining_ attributes /// The attribute being validated. - pub(super) attribute: Option, + pub(super) attribute: Option, pub(super) args: HashMap, usize>, // the _remaining_ arguments of `attribute` } impl AttributesValidationState { - pub(super) fn set_attributes(&mut self, attributes: ast::AttributeContainer, ast: &ast::SchemaAst) { - let attribute_ids = (0..ast[attributes].len()).map(|idx| ast::AttributeId::new_in_container(attributes, idx)); + pub(super) fn set_attributes(&mut self, attributes: crate::AttributeContainer, ast: &ast::SchemaAst) { + let file_id = attributes.0; + let attribute_ids = + (0..ast[attributes.1].len()).map(|idx| (file_id, ast::AttributeId::new_in_container(attributes.1, idx))); self.unused_attributes.clear(); self.unused_attributes.extend(attribute_ids); diff --git a/psl/parser-database/src/files.rs b/psl/parser-database/src/files.rs new file mode 100644 index 000000000000..f201c839eea0 --- /dev/null +++ b/psl/parser-database/src/files.rs @@ -0,0 +1,37 @@ +use crate::FileId; +use schema_ast::ast; +use std::ops::Index; + +/// The content is a list of (file path, file source text, file AST). +/// +/// The file path can be anything, the PSL implementation will only use it to display the file name +/// in errors. For example, files can come from nested directories. +pub(crate) struct Files(pub(super) Vec<(String, schema_ast::SourceFile, ast::SchemaAst)>); + +impl Files { + pub(crate) fn iter(&self) -> impl Iterator { + self.0 + .iter() + .enumerate() + .map(|(idx, (path, contents, ast))| (FileId(idx as u32), path, contents, ast)) + } +} + +impl Index for Files { + type Output = (String, schema_ast::SourceFile, ast::SchemaAst); + + fn index(&self, index: crate::FileId) -> &Self::Output { + &self.0[index.0 as usize] + } +} + +impl Index> for Files +where + ast::SchemaAst: Index, +{ + type Output = >::Output; + + fn index(&self, index: crate::InFile) -> &Self::Output { + &self[index.0].2[index.1] + } +} diff --git a/psl/parser-database/src/ids.rs b/psl/parser-database/src/ids.rs new file mode 100644 index 000000000000..55e5836f17fe --- /dev/null +++ b/psl/parser-database/src/ids.rs @@ -0,0 +1,23 @@ +use diagnostics::FileId; +use schema_ast::ast; + +/// An AST identifier with the accompanyin file ID. +pub type InFile = (FileId, Id); + +/// See [ast::ModelId] +pub type ModelId = InFile; + +/// See [ast::EnumId] +pub type EnumId = InFile; + +/// See [ast::CompositeTypeId] +pub type CompositeTypeId = InFile; + +/// See [ast::TopId] +pub type TopId = InFile; + +/// See [ast::AttributeId] +pub type AttributeId = InFile; + +/// See [ast::AttributeContainer] +pub type AttributeContainer = InFile; diff --git a/psl/parser-database/src/lib.rs b/psl/parser-database/src/lib.rs index d57ff8c98ddd..e1dd7b72b259 100644 --- a/psl/parser-database/src/lib.rs +++ b/psl/parser-database/src/lib.rs @@ -31,12 +31,16 @@ pub mod walkers; mod attributes; mod coerce_expression; mod context; +mod files; +mod ids; mod interner; mod names; mod relations; mod types; pub use coerce_expression::{coerce, coerce_array, coerce_opt}; +pub use diagnostics::FileId; +pub use ids::*; pub use names::is_reserved_type_name; pub use relations::{ManyToManyRelationId, ReferentialAction, RelationId}; pub use schema_ast::{ast, SourceFile}; @@ -45,7 +49,7 @@ pub use types::{ ScalarType, SortOrder, }; -use self::{context::Context, interner::StringId, relations::Relations, types::Types}; +use self::{context::Context, files::Files, interner::StringId, relations::Relations, types::Types}; use diagnostics::{DatamodelError, Diagnostics}; use names::Names; @@ -69,8 +73,7 @@ use names::Names; /// - Global validations are then performed on the mostly validated schema. /// Currently only index name collisions. pub struct ParserDatabase { - ast: ast::SchemaAst, - file: schema_ast::SourceFile, + asts: Files, interner: interner::StringInterner, names: Names, types: Types, @@ -79,14 +82,35 @@ pub struct ParserDatabase { impl ParserDatabase { /// See the docs on [ParserDatabase](/struct.ParserDatabase.html). - pub fn new(file: schema_ast::SourceFile, diagnostics: &mut Diagnostics) -> Self { - let ast = schema_ast::parse_schema(file.as_str(), diagnostics); + pub fn new_single_file(file: SourceFile, diagnostics: &mut Diagnostics) -> Self { + Self::new(vec![("schema.prisma".to_owned(), file)], diagnostics) + } + + /// See the docs on [ParserDatabase](/struct.ParserDatabase.html). + pub fn new(schemas: Vec<(String, schema_ast::SourceFile)>, diagnostics: &mut Diagnostics) -> Self { + let asts = schemas + .into_iter() + .enumerate() + .map(|(file_idx, (path, source))| { + let id = FileId(file_idx as u32); + let ast = schema_ast::parse_schema(source.as_str(), diagnostics, id); + (path, source, ast) + }) + .collect(); + let asts = Files(asts); let mut interner = Default::default(); let mut names = Default::default(); let mut types = Default::default(); let mut relations = Default::default(); - let mut ctx = Context::new(&ast, &mut interner, &mut names, &mut types, &mut relations, diagnostics); + let mut ctx = Context::new( + &asts, + &mut interner, + &mut names, + &mut types, + &mut relations, + diagnostics, + ); // First pass: resolve names. names::resolve_names(&mut ctx); @@ -96,8 +120,7 @@ impl ParserDatabase { attributes::create_default_attributes(&mut ctx); return ParserDatabase { - ast, - file, + asts, interner, names, types, @@ -113,8 +136,7 @@ impl ParserDatabase { attributes::create_default_attributes(&mut ctx); return ParserDatabase { - ast, - file, + asts, interner, names, types, @@ -131,8 +153,7 @@ impl ParserDatabase { relations::infer_relations(&mut ctx); ParserDatabase { - ast, - file, + asts, interner, names, types, @@ -140,9 +161,23 @@ impl ParserDatabase { } } - /// The parsed AST. - pub fn ast(&self) -> &ast::SchemaAst { - &self.ast + /// The parsed AST. This methods asserts that there is a single prisma schema file. As + /// multi-file schemas are implemented, calls to this methods should be replaced with + /// `ParserDatabase::ast()` and `ParserDatabase::iter_asts()`. + /// TODO: consider removing once the `multiFileSchema` preview feature goes GA. + pub fn ast_assert_single(&self) -> &ast::SchemaAst { + assert_eq!(self.asts.0.len(), 1); + &self.asts.0.first().unwrap().2 + } + + /// Iterate all parsed ASTs. + pub fn iter_asts(&self) -> impl Iterator { + self.asts.iter().map(|(_, _, _, ast)| ast) + } + + /// A parsed AST. + pub fn ast(&self, file_id: FileId) -> &ast::SchemaAst { + &self.asts[file_id].2 } /// The total number of enums in the schema. This is O(1). @@ -155,9 +190,25 @@ impl ParserDatabase { self.types.model_attributes.len() } + /// The source file contents. This methods asserts that there is a single prisma schema file. + /// As multi-file schemas are implemented, calls to this methods should be replaced with + /// `ParserDatabase::source()` and `ParserDatabase::iter_sources()`. + pub fn source_assert_single(&self) -> &str { + assert_eq!(self.asts.0.len(), 1); + self.asts.0[0].1.as_str() + } + /// The source file contents. - pub fn source(&self) -> &str { - self.file.as_str() + pub(crate) fn source(&self, file_id: FileId) -> &str { + self.asts[file_id].1.as_str() + } +} + +impl std::ops::Index for ParserDatabase { + type Output = (String, SourceFile, ast::SchemaAst); + + fn index(&self, index: FileId) -> &Self::Output { + &self.asts[index] } } diff --git a/psl/parser-database/src/names.rs b/psl/parser-database/src/names.rs index 3208c1c3bdb2..dff646ca5101 100644 --- a/psl/parser-database/src/names.rs +++ b/psl/parser-database/src/names.rs @@ -5,7 +5,7 @@ pub use reserved_model_names::is_reserved_type_name; use crate::{ ast::{self, ConfigBlockProperty, TopId, WithAttributes, WithIdentifier, WithName, WithSpan}, types::ScalarType, - Context, DatamodelError, StringId, + Context, DatamodelError, FileId, StringId, }; use reserved_model_names::{validate_enum_name, validate_model_name}; use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet}; @@ -14,13 +14,13 @@ use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet}; #[derive(Default)] pub(super) struct Names { /// Models, enums, composite types and type aliases - pub(super) tops: HashMap, + pub(super) tops: HashMap, /// Generators have their own namespace. - pub(super) generators: HashMap, + pub(super) generators: HashMap, /// Datasources have their own namespace. - pub(super) datasources: HashMap, - pub(super) model_fields: HashMap<(ast::ModelId, StringId), ast::FieldId>, - pub(super) composite_type_fields: HashMap<(ast::CompositeTypeId, StringId), ast::FieldId>, + pub(super) datasources: HashMap, + pub(super) model_fields: HashMap<(crate::ModelId, StringId), ast::FieldId>, + pub(super) composite_type_fields: HashMap<(crate::CompositeTypeId, StringId), ast::FieldId>, } /// `resolve_names()` is responsible for populating `ParserDatabase.names` and @@ -35,7 +35,7 @@ pub(super) fn resolve_names(ctx: &mut Context<'_>) { let mut tmp_names: HashSet<&str> = HashSet::default(); // throwaway container for duplicate checking let mut names = Names::default(); - for (top_id, top) in ctx.ast.iter_tops() { + for ((file_id, top_id), top) in ctx.iter_tops() { assert_is_not_a_reserved_scalar_type(top.identifier(), ctx); let namespace = match (top_id, top) { @@ -70,7 +70,11 @@ pub(super) fn resolve_names(ctx: &mut Context<'_>) { validate_attribute_identifiers(field, ctx); let field_name_id = ctx.interner.intern(field.name()); - if names.model_fields.insert((model_id, field_name_id), field_id).is_some() { + if names + .model_fields + .insert(((file_id, model_id), field_name_id), field_id) + .is_some() + { ctx.push_error(DatamodelError::new_duplicate_field_error( model.name(), field.name(), @@ -92,7 +96,11 @@ pub(super) fn resolve_names(ctx: &mut Context<'_>) { validate_attribute_identifiers(field, ctx); let field_name_id = ctx.interner.intern(field.name()); - if names.model_fields.insert((model_id, field_name_id), field_id).is_some() { + if names + .model_fields + .insert(((file_id, model_id), field_name_id), field_id) + .is_some() + { ctx.push_error(DatamodelError::new_duplicate_field_error( model.name(), field.name(), @@ -112,7 +120,7 @@ pub(super) fn resolve_names(ctx: &mut Context<'_>) { // Check that there is no duplicate field on the composite type if names .composite_type_fields - .insert((ctid, field_name_id), field_id) + .insert(((file_id, ctid), field_name_id), field_id) .is_some() { ctx.push_error(DatamodelError::new_composite_type_duplicate_field_error( @@ -136,16 +144,22 @@ pub(super) fn resolve_names(ctx: &mut Context<'_>) { _ => unreachable!(), }; - insert_name(top_id, top, namespace, ctx) + insert_name(file_id, top_id, top, namespace, ctx) } let _ = std::mem::replace(ctx.names, names); } -fn insert_name(top_id: TopId, top: &ast::Top, namespace: &mut HashMap, ctx: &mut Context<'_>) { +fn insert_name( + file_id: FileId, + top_id: TopId, + top: &ast::Top, + namespace: &mut HashMap, + ctx: &mut Context<'_>, +) { let name = ctx.interner.intern(top.name()); - if let Some(existing) = namespace.insert(name, top_id) { - ctx.push_error(duplicate_top_error(&ctx.ast[existing], top)); + if let Some(existing_top) = namespace.insert(name, (file_id, top_id)) { + ctx.push_error(duplicate_top_error(&ctx.asts[existing_top], top)); } } diff --git a/psl/parser-database/src/relations.rs b/psl/parser-database/src/relations.rs index 33bc8236cffa..0c1e0a454c69 100644 --- a/psl/parser-database/src/relations.rs +++ b/psl/parser-database/src/relations.rs @@ -2,7 +2,7 @@ use crate::{ ast::{self, WithName}, interner::StringId, walkers::RelationFieldId, - DatamodelError, Diagnostics, + DatamodelError, Diagnostics, FileId, {context::Context, types::RelationField}, }; use enumflags2::bitflags; @@ -75,11 +75,11 @@ pub(crate) struct Relations { /// (model_a, model_b, relation_idx) /// /// This can be interpreted as the relations _from_ a model. - forward: BTreeSet<(ast::ModelId, ast::ModelId, RelationId)>, + forward: BTreeSet<(crate::ModelId, crate::ModelId, RelationId)>, /// (model_b, model_a, relation_idx) /// /// This can be interpreted as the relations _to_ a model. - back: BTreeSet<(ast::ModelId, ast::ModelId, RelationId)>, + back: BTreeSet<(crate::ModelId, crate::ModelId, RelationId)>, } impl std::ops::Index for Relations { @@ -117,17 +117,23 @@ impl Relations { /// Iterator over relations where the provided model is model A, or the forward side of the /// relation. #[allow(clippy::wrong_self_convention)] // this is the name we want - pub(crate) fn from_model(&self, model_a_id: ast::ModelId) -> impl Iterator + '_ { + pub(crate) fn from_model(&self, model_a_id: crate::ModelId) -> impl Iterator + '_ { self.forward - .range((model_a_id, ast::ModelId::ZERO, RelationId::MIN)..(model_a_id, ast::ModelId::MAX, RelationId::MAX)) + .range( + (model_a_id, (FileId::ZERO, ast::ModelId::ZERO), RelationId::MIN) + ..(model_a_id, (FileId::MAX, ast::ModelId::MAX), RelationId::MAX), + ) .map(move |(_, _, relation_id)| *relation_id) } /// Iterator over relationss where the provided model is model B, or the backrelation side of /// the relation. - pub(crate) fn to_model(&self, model_a_id: ast::ModelId) -> impl Iterator + '_ { + pub(crate) fn to_model(&self, model_a_id: crate::ModelId) -> impl Iterator + '_ { self.back - .range((model_a_id, ast::ModelId::ZERO, RelationId::MIN)..(model_a_id, ast::ModelId::MAX, RelationId::MAX)) + .range( + (model_a_id, (FileId::ZERO, ast::ModelId::ZERO), RelationId::MIN) + ..(model_a_id, (FileId::MAX, ast::ModelId::MAX), RelationId::MAX), + ) .map(move |(_, _, relation_id)| *relation_id) } } @@ -180,8 +186,8 @@ pub(crate) struct Relation { /// The `name` argument in `@relation`. pub(super) relation_name: Option, pub(super) attributes: RelationAttributes, - pub(super) model_a: ast::ModelId, - pub(super) model_b: ast::ModelId, + pub(super) model_a: crate::ModelId, + pub(super) model_b: crate::ModelId, } impl Relation { @@ -209,7 +215,6 @@ impl Relation { // Implementation detail for this module. Should stay private. pub(super) struct RelationEvidence<'db> { pub(super) ast_model: &'db ast::Model, - pub(super) model_id: ast::ModelId, pub(super) ast_field: &'db ast::Field, pub(super) field_id: RelationFieldId, pub(super) is_self_relation: bool, @@ -219,14 +224,26 @@ pub(super) struct RelationEvidence<'db> { pub(super) opposite_relation_field: Option<(RelationFieldId, &'db ast::Field, &'db RelationField)>, } +impl RelationEvidence<'_> { + fn model_id(&self) -> crate::ModelId { + self.relation_field.model_id + } + + fn referenced_model_id(&self) -> crate::ModelId { + self.relation_field.referenced_model + } +} + pub(super) fn relation_evidence<'db>( (relation_field_id, relation_field): (RelationFieldId, &'db RelationField), ctx: &'db Context<'db>, ) -> RelationEvidence<'db> { - let ast = ctx.ast; - let ast_model = &ast[relation_field.model_id]; + let rf = &ctx.types[relation_field_id]; + let referencing_ast = &ctx.asts[rf.model_id.0].2; + let referenced_ast = &ctx.asts[rf.referenced_model.0].2; + let ast_model = &referencing_ast[relation_field.model_id.1]; let ast_field = &ast_model[relation_field.field_id]; - let opposite_model = &ast[relation_field.referenced_model]; + let opposite_model = &referenced_ast[relation_field.referenced_model.1]; let is_self_relation = relation_field.model_id == relation_field.referenced_model; let opposite_relation_field: Option<(RelationFieldId, &ast::Field, &'db RelationField)> = ctx .types @@ -238,7 +255,13 @@ pub(super) fn relation_evidence<'db>( !is_self_relation || opposite_relation_field.field_id != relation_field.field_id }) .find(|(_, opposite_relation_field)| opposite_relation_field.name == relation_field.name) - .map(|(opp_field_id, opp_rf)| (opp_field_id, &ast[opp_rf.model_id][opp_rf.field_id], opp_rf)); + .map(|(opp_field_id, opp_rf)| { + ( + opp_field_id, + &referenced_ast[opp_rf.model_id.1][opp_rf.field_id], + opp_rf, + ) + }); let is_two_way_embedded_many_to_many_relation = match (relation_field, opposite_relation_field) { (left, Some((_, _, right))) => left.fields.is_some() || right.fields.is_some(), @@ -247,7 +270,6 @@ pub(super) fn relation_evidence<'db>( RelationEvidence { ast_model, - model_id: relation_field.model_id, ast_field, field_id: relation_field_id, relation_field, @@ -359,7 +381,7 @@ pub(super) fn ingest_relation<'db>(evidence: RelationEvidence<'db>, relations: & match &evidence.relation_field.fields { Some(fields) => { let fields_are_unique = - ctx.types.model_attributes[&evidence.model_id] + ctx.types.model_attributes[&evidence.model_id()] .ast_indexes .iter() .any(|(_, idx)| { @@ -387,14 +409,14 @@ pub(super) fn ingest_relation<'db>(evidence: RelationEvidence<'db>, relations: & RelationAttributes::OneToMany(OneToManyRelationFields::Back(_)) => Relation { attributes: relation_type, relation_name: evidence.relation_field.name, - model_a: evidence.relation_field.referenced_model, - model_b: evidence.model_id, + model_a: evidence.referenced_model_id(), + model_b: evidence.model_id(), }, _ => Relation { attributes: relation_type, relation_name: evidence.relation_field.name, - model_a: evidence.model_id, - model_b: evidence.relation_field.referenced_model, + model_a: evidence.model_id(), + model_b: evidence.referenced_model_id(), }, }; @@ -408,11 +430,11 @@ pub(super) fn ingest_relation<'db>(evidence: RelationEvidence<'db>, relations: & relations .forward - .insert((evidence.model_id, evidence.relation_field.referenced_model, relation_id)); + .insert((evidence.model_id(), evidence.referenced_model_id(), relation_id)); relations .back - .insert((evidence.relation_field.referenced_model, evidence.model_id, relation_id)); + .insert((evidence.referenced_model_id(), evidence.model_id(), relation_id)); } /// An action describing the way referential integrity is managed in the system. diff --git a/psl/parser-database/src/types.rs b/psl/parser-database/src/types.rs index c5f2d222ce1e..c7626e08649d 100644 --- a/psl/parser-database/src/types.rs +++ b/psl/parser-database/src/types.rs @@ -8,11 +8,13 @@ use schema_ast::ast::{self, WithName}; use std::{collections::BTreeMap, fmt}; pub(super) fn resolve_types(ctx: &mut Context<'_>) { - for (top_id, top) in ctx.ast.iter_tops() { + for ((file_id, top_id), top) in ctx.iter_tops() { match (top_id, top) { - (ast::TopId::Model(model_id), ast::Top::Model(model)) => visit_model(model_id, model, ctx), + (ast::TopId::Model(model_id), ast::Top::Model(model)) => visit_model((file_id, model_id), model, ctx), (ast::TopId::Enum(_), ast::Top::Enum(enm)) => visit_enum(enm, ctx), - (ast::TopId::CompositeType(ct_id), ast::Top::CompositeType(ct)) => visit_composite_type(ct_id, ct, ctx), + (ast::TopId::CompositeType(ct_id), ast::Top::CompositeType(ct)) => { + visit_composite_type((file_id, ct_id), ct, ctx) + } (_, ast::Top::Source(_)) | (_, ast::Top::Generator(_)) => (), _ => unreachable!(), } @@ -21,13 +23,13 @@ pub(super) fn resolve_types(ctx: &mut Context<'_>) { #[derive(Debug, Default)] pub(super) struct Types { - pub(super) composite_type_fields: BTreeMap<(ast::CompositeTypeId, ast::FieldId), CompositeTypeField>, + pub(super) composite_type_fields: BTreeMap<(crate::CompositeTypeId, ast::FieldId), CompositeTypeField>, scalar_fields: Vec, /// This contains only the relation fields actually present in the schema /// source text. relation_fields: Vec, - pub(super) enum_attributes: HashMap, - pub(super) model_attributes: HashMap, + pub(super) enum_attributes: HashMap, + pub(super) model_attributes: HashMap, /// Sorted array of scalar fields that have an `@default()` attribute with a function that is /// not part of the base Prisma ones. This is meant for later validation in the datamodel /// connector. @@ -37,7 +39,7 @@ pub(super) struct Types { impl Types { pub(super) fn find_model_scalar_field( &self, - model_id: ast::ModelId, + model_id: crate::ModelId, field_id: ast::FieldId, ) -> Option { self.scalar_fields @@ -48,7 +50,7 @@ impl Types { pub(super) fn range_model_scalar_fields( &self, - model_id: ast::ModelId, + model_id: crate::ModelId, ) -> impl Iterator + Clone { let start = self.scalar_fields.partition_point(|sf| sf.model_id < model_id); self.scalar_fields[start..] @@ -71,7 +73,7 @@ impl Types { pub(super) fn range_model_scalar_field_ids( &self, - model_id: ast::ModelId, + model_id: crate::ModelId, ) -> impl Iterator + Clone { let end = self.scalar_fields.partition_point(|sf| sf.model_id <= model_id); let start = self.scalar_fields[..end].partition_point(|sf| sf.model_id < model_id); @@ -80,7 +82,7 @@ impl Types { pub(super) fn range_model_relation_fields( &self, - model_id: ast::ModelId, + model_id: crate::ModelId, ) -> impl Iterator + Clone { let first_relation_field_idx = self.relation_fields.partition_point(|rf| rf.model_id < model_id); self.relation_fields[first_relation_field_idx..] @@ -90,7 +92,7 @@ impl Types { .map(move |(idx, rf)| (RelationFieldId((first_relation_field_idx + idx) as u32), rf)) } - pub(super) fn refine_field(&self, id: (ast::ModelId, ast::FieldId)) -> Either { + pub(super) fn refine_field(&self, id: (crate::ModelId, ast::FieldId)) -> Either { self.relation_fields .binary_search_by_key(&id, |rf| (rf.model_id, rf.field_id)) .map(|idx| Either::Left(RelationFieldId(idx as u32))) @@ -158,7 +160,7 @@ pub(super) struct CompositeTypeField { #[derive(Debug)] enum FieldType { - Model(ast::ModelId), + Model(crate::ModelId), Scalar(ScalarFieldType), } @@ -177,9 +179,9 @@ impl UnsupportedType { #[derive(Debug, Clone, Copy, PartialEq)] pub enum ScalarFieldType { /// A composite type - CompositeType(ast::CompositeTypeId), + CompositeType(crate::CompositeTypeId), /// An enum - Enum(ast::EnumId), + Enum(crate::EnumId), /// A Prisma scalar type BuiltInScalar(ScalarType), /// An `Unsupported("...")` type @@ -196,7 +198,7 @@ impl ScalarFieldType { } /// Try to interpret this field type as a Composite Type. - pub fn as_composite_type(self) -> Option { + pub fn as_composite_type(self) -> Option { match self { ScalarFieldType::CompositeType(id) => Some(id), _ => None, @@ -204,7 +206,7 @@ impl ScalarFieldType { } /// Try to interpret this field type as an enum. - pub fn as_enum(self) -> Option { + pub fn as_enum(self) -> Option { match self { ScalarFieldType::Enum(id) => Some(id), _ => None, @@ -261,12 +263,12 @@ impl ScalarFieldType { pub(crate) struct DefaultAttribute { pub(crate) mapped_name: Option, pub(crate) argument_idx: usize, - pub(crate) default_attribute: ast::AttributeId, + pub(crate) default_attribute: crate::AttributeId, } #[derive(Debug)] pub(crate) struct ScalarField { - pub(crate) model_id: ast::ModelId, + pub(crate) model_id: crate::ModelId, pub(crate) field_id: ast::FieldId, pub(crate) r#type: ScalarFieldType, pub(crate) is_ignored: bool, @@ -284,9 +286,9 @@ pub(crate) struct ScalarField { #[derive(Debug)] pub(crate) struct RelationField { - pub(crate) model_id: ast::ModelId, + pub(crate) model_id: crate::ModelId, pub(crate) field_id: ast::FieldId, - pub(crate) referenced_model: ast::ModelId, + pub(crate) referenced_model: crate::ModelId, pub(crate) on_delete: Option<(crate::ReferentialAction, ast::Span)>, pub(crate) on_update: Option<(crate::ReferentialAction, ast::Span)>, /// The fields _explicitly present_ in the AST. @@ -302,7 +304,7 @@ pub(crate) struct RelationField { } impl RelationField { - fn new(model_id: ast::ModelId, field_id: ast::FieldId, referenced_model: ast::ModelId) -> Self { + fn new(model_id: crate::ModelId, field_id: ast::FieldId, referenced_model: crate::ModelId) -> Self { RelationField { model_id, field_id, @@ -491,7 +493,7 @@ impl IndexAttribute { pub(crate) struct IdAttribute { pub(crate) fields: Vec, pub(super) source_field: Option, - pub(super) source_attribute: ast::AttributeId, + pub(super) source_attribute: crate::AttributeId, pub(super) name: Option, pub(super) mapped_name: Option, pub(super) clustered: Option, @@ -545,7 +547,7 @@ pub struct IndexFieldPath { /// // ^this one is the path. in this case a vector of one element /// } /// ``` - path: Vec<(ast::CompositeTypeId, ast::FieldId)>, + path: Vec<(crate::CompositeTypeId, ast::FieldId)>, } impl IndexFieldPath { @@ -553,7 +555,7 @@ impl IndexFieldPath { Self { root, path: Vec::new() } } - pub(crate) fn push_field(&mut self, ctid: ast::CompositeTypeId, field_id: ast::FieldId) { + pub(crate) fn push_field(&mut self, ctid: crate::CompositeTypeId, field_id: ast::FieldId) { self.path.push((ctid, field_id)); } @@ -593,7 +595,7 @@ impl IndexFieldPath { /// @@index([a.field]) /// } /// ``` - pub fn path(&self) -> &[(ast::CompositeTypeId, ast::FieldId)] { + pub fn path(&self) -> &[(crate::CompositeTypeId, ast::FieldId)] { &self.path } @@ -601,10 +603,10 @@ impl IndexFieldPath { /// or in a composite type embedded in the model. Returns the same value as /// the [`root`](Self::root()) method if the field is in a model rather than in a /// composite type. - pub fn field_in_index(&self) -> Either { + pub fn field_in_index(&self) -> Either { self.path .last() - .map(|id| Either::Right(*id)) + .map(|(ct, field)| Either::Right((*ct, *field))) .unwrap_or(Either::Left(self.root)) } } @@ -629,7 +631,7 @@ pub(super) struct EnumAttributes { pub(crate) schema: Option<(StringId, ast::Span)>, } -fn visit_model<'db>(model_id: ast::ModelId, ast_model: &'db ast::Model, ctx: &mut Context<'db>) { +fn visit_model<'db>(model_id: crate::ModelId, ast_model: &'db ast::Model, ctx: &mut Context<'db>) { for (field_id, ast_field) in ast_model.iter_fields() { match field_type(ast_field, ctx) { Ok(FieldType::Model(referenced_model)) => { @@ -650,7 +652,6 @@ fn visit_model<'db>(model_id: ast::ModelId, ast_model: &'db ast::Model, ctx: &mu } Err(supported) => { let top_names: Vec<_> = ctx - .ast .iter_tops() .filter_map(|(_, top)| match top { ast::Top::Source(_) | ast::Top::Generator(_) => None, @@ -687,7 +688,7 @@ fn visit_model<'db>(model_id: ast::ModelId, ast_model: &'db ast::Model, ctx: &mu } } -fn visit_composite_type<'db>(ct_id: ast::CompositeTypeId, ct: &'db ast::CompositeType, ctx: &mut Context<'db>) { +fn visit_composite_type<'db>(ct_id: crate::CompositeTypeId, ct: &'db ast::CompositeType, ctx: &mut Context<'db>) { for (field_id, ast_field) in ct.iter_fields() { match field_type(ast_field, ctx) { Ok(FieldType::Scalar(scalar_type)) => { @@ -700,7 +701,7 @@ fn visit_composite_type<'db>(ct_id: ast::CompositeTypeId, ct: &'db ast::Composit ctx.types.composite_type_fields.insert((ct_id, field_id), field); } Ok(FieldType::Model(referenced_model_id)) => { - let referenced_model_name = ctx.ast[referenced_model_id].name(); + let referenced_model_name = ctx.asts[referenced_model_id].name(); ctx.push_error(DatamodelError::new_composite_type_validation_error(&format!("{referenced_model_name} refers to a model, making this a relation field. Relation fields inside composite types are not supported."), ct.name(), ast_field.field_type.span())) } Err(supported) => ctx.push_error(DatamodelError::new_type_not_found_error( @@ -734,13 +735,20 @@ fn field_type<'db>(field: &'db ast::Field, ctx: &mut Context<'db>) -> Result Ok(FieldType::Model(model_id)), - Some((ast::TopId::Enum(enum_id), ast::Top::Enum(_))) => Ok(FieldType::Scalar(ScalarFieldType::Enum(enum_id))), - Some((ast::TopId::CompositeType(ctid), ast::Top::CompositeType(_))) => { - Ok(FieldType::Scalar(ScalarFieldType::CompositeType(ctid))) + match ctx + .names + .tops + .get(&supported_string_id) + .map(|id| (id.0, id.1, &ctx.asts[*id])) + { + Some((file_id, ast::TopId::Model(model_id), ast::Top::Model(_))) => Ok(FieldType::Model((file_id, model_id))), + Some((file_id, ast::TopId::Enum(enum_id), ast::Top::Enum(_))) => { + Ok(FieldType::Scalar(ScalarFieldType::Enum((file_id, enum_id)))) + } + Some((file_id, ast::TopId::CompositeType(ctid), ast::Top::CompositeType(_))) => { + Ok(FieldType::Scalar(ScalarFieldType::CompositeType((file_id, ctid)))) } - Some((_, ast::Top::Generator(_))) | Some((_, ast::Top::Source(_))) => unreachable!(), + Some((_, _, ast::Top::Generator(_))) | Some((_, _, ast::Top::Source(_))) => unreachable!(), None => Err(supported), _ => unreachable!(), } diff --git a/psl/parser-database/src/walkers.rs b/psl/parser-database/src/walkers.rs index 7ee92e3e3f70..abfe290b5bd6 100644 --- a/psl/parser-database/src/walkers.rs +++ b/psl/parser-database/src/walkers.rs @@ -25,6 +25,8 @@ pub use relation::*; pub use relation_field::*; pub use scalar_field::*; +use crate::{ast, FileId}; + /// A generic walker. Only walkers intantiated with a concrete ID type (`I`) are useful. #[derive(Clone, Copy)] pub struct Walker<'db, I> { @@ -52,12 +54,18 @@ where } impl crate::ParserDatabase { + fn iter_tops(&self) -> impl Iterator + '_ { + self.asts + .iter() + .flat_map(move |(file_id, _, _, ast)| ast.iter_tops().map(move |(top_id, top)| (file_id, top_id, top))) + } + /// Find an enum by name. pub fn find_enum<'db>(&'db self, name: &str) -> Option> { self.interner .lookup(name) .and_then(|name_id| self.names.tops.get(&name_id)) - .and_then(|top_id| top_id.as_enum_id()) + .and_then(|(file_id, top_id)| top_id.as_enum_id().map(|id| (*file_id, id))) .map(|enum_id| self.walk(enum_id)) } @@ -66,7 +74,7 @@ impl crate::ParserDatabase { self.interner .lookup(name) .and_then(|name_id| self.names.tops.get(&name_id)) - .and_then(|top_id| top_id.as_model_id()) + .and_then(|(file_id, top_id)| top_id.as_model_id().map(|id| (*file_id, id))) .map(|model_id| self.walk(model_id)) } @@ -77,35 +85,31 @@ impl crate::ParserDatabase { /// Walk all enums in the schema. pub fn walk_enums(&self) -> impl Iterator> { - self.ast() - .iter_tops() - .filter_map(|(top_id, _)| top_id.as_enum_id()) - .map(move |enum_id| Walker { db: self, id: enum_id }) + self.iter_tops() + .filter_map(|(file_id, top_id, _)| top_id.as_enum_id().map(|id| (file_id, id))) + .map(move |enum_id| self.walk(enum_id)) } /// Walk all the models in the schema. pub fn walk_models(&self) -> impl Iterator> + '_ { - self.ast() - .iter_tops() - .filter_map(|(top_id, _)| top_id.as_model_id()) - .map(move |model_id| self.walk(model_id)) + self.iter_tops() + .filter_map(|(file_id, top_id, _)| top_id.as_model_id().map(|id| (file_id, id))) + .map(move |(file_id, model_id)| self.walk((file_id, model_id))) .filter(|m| !m.ast_model().is_view()) } /// Walk all the views in the schema. pub fn walk_views(&self) -> impl Iterator> + '_ { - self.ast() - .iter_tops() - .filter_map(|(top_id, _)| top_id.as_model_id()) + self.iter_tops() + .filter_map(|(file_id, top_id, _)| top_id.as_model_id().map(|id| (file_id, id))) .map(move |model_id| self.walk(model_id)) .filter(|m| m.ast_model().is_view()) } /// Walk all the composite types in the schema. pub fn walk_composite_types(&self) -> impl Iterator> + '_ { - self.ast() - .iter_tops() - .filter_map(|(top_id, _)| top_id.as_composite_type_id()) + self.iter_tops() + .filter_map(|(file_id, top_id, _)| top_id.as_composite_type_id().map(|id| (file_id, id))) .map(|id| self.walk(id)) } diff --git a/psl/parser-database/src/walkers/composite_type.rs b/psl/parser-database/src/walkers/composite_type.rs index f22648e286e1..af286e9d0f2d 100644 --- a/psl/parser-database/src/walkers/composite_type.rs +++ b/psl/parser-database/src/walkers/composite_type.rs @@ -1,5 +1,5 @@ use super::Walker; -use crate::{ast, ScalarFieldType, ScalarType}; +use crate::{ast, FileId, ScalarFieldType, ScalarType}; use diagnostics::Span; use schema_ast::ast::{WithDocumentation, WithName}; @@ -17,20 +17,20 @@ use schema_ast::ast::{WithDocumentation, WithName}; /// countryCode String /// } /// ``` -pub type CompositeTypeWalker<'db> = Walker<'db, ast::CompositeTypeId>; +pub type CompositeTypeWalker<'db> = Walker<'db, crate::CompositeTypeId>; /// A field in a composite type. -pub type CompositeTypeFieldWalker<'db> = Walker<'db, (ast::CompositeTypeId, ast::FieldId)>; +pub type CompositeTypeFieldWalker<'db> = Walker<'db, (crate::CompositeTypeId, ast::FieldId)>; impl<'db> CompositeTypeWalker<'db> { /// The ID of the composite type node in the AST. - pub fn composite_type_id(self) -> ast::CompositeTypeId { + pub fn composite_type_id(self) -> (FileId, ast::CompositeTypeId) { self.id } /// The composite type node in the AST. pub fn ast_composite_type(self) -> &'db ast::CompositeType { - &self.db.ast()[self.id] + &self.db.asts[self.id] } /// The name of the composite type in the schema. @@ -53,7 +53,7 @@ impl<'db> CompositeTypeFieldWalker<'db> { /// The AST node for the field. pub fn ast_field(self) -> &'db ast::Field { - &self.db.ast[self.id.0][self.id.1] + &self.db.asts[self.id.0][self.id.1] } /// The composite type containing the field. @@ -101,7 +101,10 @@ impl<'db> CompositeTypeFieldWalker<'db> { /// The `@default()` AST attribute on the field, if any. pub fn default_attribute(self) -> Option<&'db ast::Attribute> { - self.field().default.as_ref().map(|d| &self.db.ast[d.default_attribute]) + self.field() + .default + .as_ref() + .map(|d| &self.db.asts[(self.id.0 .0, d.default_attribute.1)]) } /// (attribute scope, native type name, arguments, span) diff --git a/psl/parser-database/src/walkers/enum.rs b/psl/parser-database/src/walkers/enum.rs index c97b420a59fa..07624527bb11 100644 --- a/psl/parser-database/src/walkers/enum.rs +++ b/psl/parser-database/src/walkers/enum.rs @@ -1,11 +1,10 @@ -use schema_ast::ast::{IndentationType, NewlineType}; - use crate::{ast, ast::WithDocumentation, types, walkers::Walker}; +use schema_ast::ast::{IndentationType, NewlineType}; /// An `enum` declaration in the schema. -pub type EnumWalker<'db> = Walker<'db, ast::EnumId>; +pub type EnumWalker<'db> = Walker<'db, crate::EnumId>; /// One value in an `enum` declaration in the schema. -pub type EnumValueWalker<'db> = Walker<'db, (ast::EnumId, usize)>; +pub type EnumValueWalker<'db> = Walker<'db, (crate::EnumId, usize)>; impl<'db> EnumWalker<'db> { fn attributes(self) -> &'db types::EnumAttributes { @@ -19,7 +18,7 @@ impl<'db> EnumWalker<'db> { /// The AST node. pub fn ast_enum(self) -> &'db ast::Enum { - &self.db.ast()[self.id] + &self.db.asts[self.id] } /// The database name of the enum. diff --git a/psl/parser-database/src/walkers/field.rs b/psl/parser-database/src/walkers/field.rs index d8babd993391..87bea6560344 100644 --- a/psl/parser-database/src/walkers/field.rs +++ b/psl/parser-database/src/walkers/field.rs @@ -6,12 +6,12 @@ use crate::{ use schema_ast::ast; /// A model field, scalar or relation. -pub type FieldWalker<'db> = Walker<'db, (ast::ModelId, ast::FieldId)>; +pub type FieldWalker<'db> = Walker<'db, (crate::ModelId, ast::FieldId)>; impl<'db> FieldWalker<'db> { /// The AST node for the field. pub fn ast_field(self) -> &'db ast::Field { - &self.db.ast[self.id.0][self.id.1] + &self.db.asts[self.id.0][self.id.1] } /// The field name. @@ -45,20 +45,14 @@ pub enum RefinedFieldWalker<'db> { impl<'db> From> for FieldWalker<'db> { fn from(w: ScalarFieldWalker<'db>) -> Self { let ScalarField { model_id, field_id, .. } = w.db.types[w.id]; - Walker { - db: w.db, - id: (model_id, field_id), - } + w.db.walk((model_id, field_id)) } } impl<'db> From> for FieldWalker<'db> { fn from(w: RelationFieldWalker<'db>) -> Self { let RelationField { model_id, field_id, .. } = w.db.types[w.id]; - Walker { - db: w.db, - id: (model_id, field_id), - } + w.db.walk((model_id, field_id)) } } diff --git a/psl/parser-database/src/walkers/index.rs b/psl/parser-database/src/walkers/index.rs index e75c4c58fc87..63b6b30b7b44 100644 --- a/psl/parser-database/src/walkers/index.rs +++ b/psl/parser-database/src/walkers/index.rs @@ -11,7 +11,7 @@ use crate::{ /// An index, unique or fulltext attribute. #[derive(Copy, Clone)] pub struct IndexWalker<'db> { - pub(crate) model_id: ast::ModelId, + pub(crate) model_id: crate::ModelId, pub(crate) index: ast::AttributeId, pub(crate) db: &'db ParserDatabase, pub(crate) index_attribute: &'db IndexAttribute, @@ -69,7 +69,7 @@ impl<'db> IndexWalker<'db> { /// The AST node of the index/unique attribute. pub fn ast_attribute(self) -> &'db ast::Attribute { - &self.db.ast[self.index] + &self.db.asts[(self.model_id.0, self.index)] } pub(crate) fn attribute(self) -> &'db IndexAttribute { diff --git a/psl/parser-database/src/walkers/model.rs b/psl/parser-database/src/walkers/model.rs index 313efd0ca819..e4290a1a00f7 100644 --- a/psl/parser-database/src/walkers/model.rs +++ b/psl/parser-database/src/walkers/model.rs @@ -12,11 +12,12 @@ use super::{ use crate::{ ast::{self, WithName}, types::ModelAttributes, + FileId, }; use schema_ast::ast::{IndentationType, NewlineType, WithSpan}; /// A `model` declaration in the Prisma schema. -pub type ModelWalker<'db> = super::Walker<'db, ast::ModelId>; +pub type ModelWalker<'db> = super::Walker<'db, (FileId, ast::ModelId)>; impl<'db> ModelWalker<'db> { /// The name of the model. @@ -59,14 +60,9 @@ impl<'db> ModelWalker<'db> { .is_some() } - /// The ID of the model in the db - pub fn model_id(self) -> ast::ModelId { - self.id - } - /// The AST node. pub fn ast_model(self) -> &'db ast::Model { - &self.db.ast[self.id] + &self.db.asts[self.id] } /// The parsed attributes. @@ -86,7 +82,7 @@ impl<'db> ModelWalker<'db> { self.attributes() .mapped_name .map(|id| &self.db[id]) - .unwrap_or_else(|| self.db.ast[self.id].name()) + .unwrap_or_else(|| self.ast_model().name()) } /// Used in validation. True only if the model has a single field id. @@ -216,7 +212,7 @@ impl<'db> ModelWalker<'db> { None => return IndentationType::default(), }; - let src = self.db.source(); + let src = self.db.source(self.id.0); let start = field.ast_field().span().start; let mut spaces = 0; @@ -241,7 +237,7 @@ impl<'db> ModelWalker<'db> { None => return NewlineType::default(), }; - let src = self.db.source(); + let src = self.db.source(self.id.0); let start = field.ast_field().span().end - 2; match src.chars().nth(start) { diff --git a/psl/parser-database/src/walkers/model/primary_key.rs b/psl/parser-database/src/walkers/model/primary_key.rs index ba3de30ea633..71792dce770b 100644 --- a/psl/parser-database/src/walkers/model/primary_key.rs +++ b/psl/parser-database/src/walkers/model/primary_key.rs @@ -8,7 +8,7 @@ use crate::{ /// An `@(@)id` attribute in the schema. #[derive(Copy, Clone)] pub struct PrimaryKeyWalker<'db> { - pub(crate) model_id: ast::ModelId, + pub(crate) model_id: crate::ModelId, pub(crate) attribute: &'db IdAttribute, pub(crate) db: &'db ParserDatabase, } @@ -16,7 +16,7 @@ pub struct PrimaryKeyWalker<'db> { impl<'db> PrimaryKeyWalker<'db> { /// The `@(@)id` AST node. pub fn ast_attribute(self) -> &'db ast::Attribute { - &self.db.ast[self.attribute.source_attribute] + &self.db.asts[(self.model_id.0, self.attribute.source_attribute.1)] } /// The mapped name of the id. diff --git a/psl/parser-database/src/walkers/relation.rs b/psl/parser-database/src/walkers/relation.rs index 1557633fbc0b..26e3ec61e052 100644 --- a/psl/parser-database/src/walkers/relation.rs +++ b/psl/parser-database/src/walkers/relation.rs @@ -14,7 +14,7 @@ pub type RelationWalker<'db> = Walker<'db, RelationId>; impl<'db> RelationWalker<'db> { /// The models at each end of the relation. [model A, model B]. Can be the same model twice. - pub fn models(self) -> [ast::ModelId; 2] { + pub fn models(self) -> [(FileId, ast::ModelId); 2] { let rel = self.get(); [rel.model_a, rel.model_b] } diff --git a/psl/parser-database/src/walkers/relation_field.rs b/psl/parser-database/src/walkers/relation_field.rs index b96380f03bf6..7f6b2e8037a4 100644 --- a/psl/parser-database/src/walkers/relation_field.rs +++ b/psl/parser-database/src/walkers/relation_field.rs @@ -28,7 +28,7 @@ impl<'db> RelationFieldWalker<'db> { /// The AST node of the field. pub fn ast_field(self) -> &'db ast::Field { let RelationField { model_id, field_id, .. } = self.db.types[self.id]; - &self.db.ast[model_id][field_id] + &self.db.asts[model_id][field_id] } pub(crate) fn attributes(self) -> &'db RelationField { @@ -83,11 +83,12 @@ impl<'db> RelationFieldWalker<'db> { /// The `@relation` attribute in the field AST. pub fn relation_attribute(self) -> Option<&'db ast::Attribute> { - self.attributes().relation_attribute.map(|id| &self.db.ast[id]) + let attrs = self.attributes(); + attrs.relation_attribute.map(|id| &self.db.asts[(attrs.model_id.0, id)]) } /// Does the relation field reference the passed in model? - pub fn references_model(self, other: ast::ModelId) -> bool { + pub fn references_model(self, other: crate::ModelId) -> bool { self.attributes().referenced_model == other } diff --git a/psl/parser-database/src/walkers/scalar_field.rs b/psl/parser-database/src/walkers/scalar_field.rs index 9cea79b8485a..7a9a0984584a 100644 --- a/psl/parser-database/src/walkers/scalar_field.rs +++ b/psl/parser-database/src/walkers/scalar_field.rs @@ -19,7 +19,7 @@ impl<'db> ScalarFieldWalker<'db> { /// The field node in the AST. pub fn ast_field(self) -> &'db ast::Field { let ScalarField { model_id, field_id, .. } = self.attributes(); - &self.db.ast[*model_id][*field_id] + &self.db.asts[*model_id][*field_id] } /// Is this field unique? This method will return true if: @@ -53,7 +53,7 @@ impl<'db> ScalarFieldWalker<'db> { .default .as_ref() .map(|d| d.default_attribute) - .map(|id| &self.db.ast[id]) + .map(|id| &self.db.asts[id]) } /// The final database name of the field. See crate docs for explanations on database names. @@ -169,7 +169,7 @@ pub struct DefaultValueWalker<'db> { impl<'db> DefaultValueWalker<'db> { /// The AST node of the attribute. pub fn ast_attribute(self) -> &'db ast::Attribute { - &self.db.ast[self.default.default_attribute] + &self.db.asts[self.default.default_attribute] } /// The value expression in the `@default` attribute. @@ -374,7 +374,7 @@ impl<'db> ScalarFieldAttributeWalker<'db> { let mut result = vec![(root_name, None)]; for (ctid, field_id) in path.path() { - let ct = &self.db.ast[*ctid]; + let ct = &self.db.asts[*ctid]; let field = ct[*field_id].name(); result.push((field, Some(ct.name()))); @@ -400,7 +400,7 @@ impl<'db> ScalarFieldAttributeWalker<'db> { let mut result = vec![(root, None)]; for (ctid, field_id) in path.path() { - let ct = &self.db.ast[*ctid]; + let ct = &self.db.asts[*ctid]; let field = &self.db.types.composite_type_fields[&(*ctid, *field_id)] .mapped_name diff --git a/psl/psl-core/src/builtin_connectors/postgres_datamodel_connector.rs b/psl/psl-core/src/builtin_connectors/postgres_datamodel_connector.rs index d5cebd189bc6..3bb04eed4514 100644 --- a/psl/psl-core/src/builtin_connectors/postgres_datamodel_connector.rs +++ b/psl/psl-core/src/builtin_connectors/postgres_datamodel_connector.rs @@ -497,7 +497,7 @@ impl Connector for PostgresDatamodelConnector { let index_field = db .walk_models() .chain(db.walk_views()) - .find(|model| model.model_id() == model_id) + .find(|model| model.id.1 == model_id) .and_then(|model| { model.indexes().find(|index| { index.attribute_id() diff --git a/psl/psl-core/src/configuration/configuration_struct.rs b/psl/psl-core/src/configuration/configuration_struct.rs index 3da58f6efdea..41d3d6ebf413 100644 --- a/psl/psl-core/src/configuration/configuration_struct.rs +++ b/psl/psl-core/src/configuration/configuration_struct.rs @@ -6,7 +6,7 @@ use crate::{ }; use enumflags2::BitFlags; -#[derive(Debug)] +#[derive(Debug, Default)] pub struct Configuration { pub generators: Vec, pub datasources: Vec, @@ -18,7 +18,7 @@ impl Configuration { if self.datasources.is_empty() { Err(DatamodelError::new_validation_error( "You defined no datasource. You must define exactly one datasource.", - schema_ast::ast::Span::new(0, 0), + schema_ast::ast::Span::new(0, 0, diagnostics::FileId::ZERO), ) .into()) } else { diff --git a/psl/psl-core/src/lib.rs b/psl/psl-core/src/lib.rs index ca0ce37cc0f1..9d1877bd26da 100644 --- a/psl/psl-core/src/lib.rs +++ b/psl/psl-core/src/lib.rs @@ -52,14 +52,57 @@ impl ValidatedSchema { pub fn relation_mode(&self) -> datamodel_connector::RelationMode { self.relation_mode } + + pub fn render_diagnostics(&self) -> String { + let mut out = Vec::new(); + + for error in self.diagnostics.errors() { + let (file_name, source, _) = &self.db[error.span().file_id]; + error.pretty_print(&mut out, file_name, source.as_str()).unwrap(); + } + + String::from_utf8(out).unwrap() + } } /// The most general API for dealing with Prisma schemas. It accumulates what analysis and /// validation information it can, and returns it along with any error and warning diagnostics. pub fn validate(file: SourceFile, connectors: ConnectorRegistry<'_>) -> ValidatedSchema { let mut diagnostics = Diagnostics::new(); - let db = ParserDatabase::new(file, &mut diagnostics); - let configuration = validate_configuration(db.ast(), &mut diagnostics, connectors); + let db = ParserDatabase::new_single_file(file, &mut diagnostics); + let configuration = validate_configuration(db.ast_assert_single(), &mut diagnostics, connectors); + let datasources = &configuration.datasources; + let out = validate::validate(db, datasources, configuration.preview_features(), diagnostics); + + ValidatedSchema { + diagnostics: out.diagnostics, + configuration, + connector: out.connector, + db: out.db, + relation_mode: out.relation_mode, + } +} + +/// The most general API for dealing with Prisma schemas. It accumulates what analysis and +/// validation information it can, and returns it along with any error and warning diagnostics. +pub fn validate_multi_file(files: Vec<(String, SourceFile)>, connectors: ConnectorRegistry<'_>) -> ValidatedSchema { + assert!( + !files.is_empty(), + "psl::validate_multi_file() must be called with at least one file" + ); + let mut diagnostics = Diagnostics::new(); + let db = ParserDatabase::new(files, &mut diagnostics); + + // TODO: the bulk of configuration block analysis should be part of ParserDatabase::new(). + let mut configuration = Configuration::default(); + for ast in db.iter_asts() { + let new_config = validate_configuration(ast, &mut diagnostics, connectors); + + configuration.datasources.extend(new_config.datasources.into_iter()); + configuration.generators.extend(new_config.generators.into_iter()); + configuration.warnings.extend(new_config.warnings.into_iter()); + } + let datasources = &configuration.datasources; let out = validate::validate(db, datasources, configuration.preview_features(), diagnostics); @@ -77,8 +120,8 @@ pub fn validate(file: SourceFile, connectors: ConnectorRegistry<'_>) -> Validate /// computationally or in terms of bundle size (e.g., for `query-engine-wasm`). pub fn parse_without_validation(file: SourceFile, connectors: ConnectorRegistry<'_>) -> ValidatedSchema { let mut diagnostics = Diagnostics::new(); - let db = ParserDatabase::new(file, &mut diagnostics); - let configuration = validate_configuration(db.ast(), &mut diagnostics, connectors); + let db = ParserDatabase::new_single_file(file, &mut diagnostics); + let configuration = validate_configuration(db.ast_assert_single(), &mut diagnostics, connectors); let datasources = &configuration.datasources; let out = validate::parse_without_validation(db, datasources); @@ -97,7 +140,7 @@ pub fn parse_configuration( connectors: ConnectorRegistry<'_>, ) -> Result { let mut diagnostics = Diagnostics::default(); - let ast = schema_ast::parse_schema(schema, &mut diagnostics); + let ast = schema_ast::parse_schema(schema, &mut diagnostics, diagnostics::FileId::ZERO); let out = validate_configuration(&ast, &mut diagnostics, connectors); diagnostics.to_result().map(|_| out) } diff --git a/psl/psl-core/src/reformat.rs b/psl/psl-core/src/reformat.rs index eaf8aa5400b4..09d21c731b38 100644 --- a/psl/psl-core/src/reformat.rs +++ b/psl/psl-core/src/reformat.rs @@ -9,7 +9,7 @@ pub fn reformat(source: &str, indent_width: usize) -> Option { let file = SourceFile::new_allocated(Arc::from(source.to_owned().into_boxed_str())); let mut diagnostics = diagnostics::Diagnostics::new(); - let db = parser_database::ParserDatabase::new(file, &mut diagnostics); + let db = parser_database::ParserDatabase::new_single_file(file, &mut diagnostics); let source_to_reformat = if diagnostics.has_errors() { Cow::Borrowed(source) diff --git a/psl/psl-core/src/validate/validation_pipeline/validations/composite_types.rs b/psl/psl-core/src/validate/validation_pipeline/validations/composite_types.rs index da0a3db3a515..fbaaa3525a49 100644 --- a/psl/psl-core/src/validate/validation_pipeline/validations/composite_types.rs +++ b/psl/psl-core/src/validate/validation_pipeline/validations/composite_types.rs @@ -2,7 +2,7 @@ use super::default_value; use crate::{datamodel_connector::ConnectorCapability, validate::validation_pipeline::context::Context}; use diagnostics::DatamodelError; use parser_database::{ - ast::{self, WithSpan}, + ast::WithSpan, walkers::{CompositeTypeFieldWalker, CompositeTypeWalker}, ScalarFieldType, }; @@ -11,8 +11,8 @@ use std::{fmt, rc::Rc}; /// Detect compound type chains that form a cycle, that is not broken with either an optional or an /// array type. pub(super) fn detect_composite_cycles(ctx: &mut Context<'_>) { - let mut visited: Vec = Vec::new(); - let mut errors: Vec<(ast::CompositeTypeId, DatamodelError)> = Vec::new(); + let mut visited: Vec = Vec::new(); + let mut errors: Vec<(parser_database::CompositeTypeId, DatamodelError)> = Vec::new(); let mut fields_to_traverse: Vec<(CompositeTypeFieldWalker<'_>, Option>>)> = ctx .db diff --git a/psl/psl-core/src/validate/validation_pipeline/validations/constraint_namespace.rs b/psl/psl-core/src/validate/validation_pipeline/validations/constraint_namespace.rs index e4b02ebc9308..495aa9b44670 100644 --- a/psl/psl-core/src/validate/validation_pipeline/validations/constraint_namespace.rs +++ b/psl/psl-core/src/validate/validation_pipeline/validations/constraint_namespace.rs @@ -1,5 +1,4 @@ use crate::datamodel_connector::{walker_ext_traits::*, ConstraintScope}; -use parser_database::ast; use std::{borrow::Cow, collections::HashMap, ops::Deref}; /// A constraint namespace consists of two kinds of namespaces: @@ -10,8 +9,8 @@ use std::{borrow::Cow, collections::HashMap, ops::Deref}; pub(crate) struct ConstraintNamespace<'db> { // (ConstraintScope, schema name, name) -> occurrences global: HashMap<(ConstraintScope, Option<&'db str>, Cow<'db, str>), usize>, - local: HashMap<(ast::ModelId, ConstraintScope, Cow<'db, str>), usize>, - local_custom_name: HashMap<(ast::ModelId, Cow<'db, str>), usize>, + local: HashMap<(parser_database::ModelId, ConstraintScope, Cow<'db, str>), usize>, + local_custom_name: HashMap<(parser_database::ModelId, Cow<'db, str>), usize>, } impl<'db> ConstraintNamespace<'db> { @@ -19,7 +18,7 @@ impl<'db> ConstraintNamespace<'db> { /// local violations in the given model. pub(crate) fn constraint_name_scope_violations( &self, - model_id: ast::ModelId, + model_id: parser_database::ModelId, name: ConstraintName<'db>, ctx: &super::Context<'db>, ) -> impl Iterator + '_ { @@ -43,7 +42,7 @@ impl<'db> ConstraintNamespace<'db> { fn local_constraint_name_scope_violations( &self, - model_id: ast::ModelId, + model_id: parser_database::ModelId, name: ConstraintName<'db>, ) -> impl Iterator + '_ { name.possible_scopes().filter(move |scope| { @@ -54,7 +53,11 @@ impl<'db> ConstraintNamespace<'db> { }) } - pub(crate) fn local_custom_name_scope_violations(&self, model_id: ast::ModelId, name: &'db str) -> bool { + pub(crate) fn local_custom_name_scope_violations( + &self, + model_id: parser_database::ModelId, + name: &'db str, + ) -> bool { match self.local_custom_name.get(&(model_id, Cow::from(name))) { Some(count) => *count > 1, None => false, @@ -127,7 +130,7 @@ impl<'db> ConstraintNamespace<'db> { for index in model.indexes() { let counter = self .local - .entry((model.model_id(), scope, index.constraint_name(ctx.connector))) + .entry((model.id, scope, index.constraint_name(ctx.connector))) .or_default(); *counter += 1; @@ -139,7 +142,7 @@ impl<'db> ConstraintNamespace<'db> { pub(super) fn add_local_primary_keys(&mut self, scope: ConstraintScope, ctx: &super::Context<'db>) { for model in ctx.db.walk_models().chain(ctx.db.walk_views()) { if let Some(name) = model.primary_key().and_then(|pk| pk.constraint_name(ctx.connector)) { - let counter = self.local.entry((model.model_id(), scope, name)).or_default(); + let counter = self.local.entry((model.id, scope, name)).or_default(); *counter += 1; } } @@ -149,18 +152,12 @@ impl<'db> ConstraintNamespace<'db> { pub(super) fn add_local_custom_names_for_primary_keys_and_uniques(&mut self, ctx: &super::Context<'db>) { for model in ctx.db.walk_models().chain(ctx.db.walk_views()) { if let Some(name) = model.primary_key().and_then(|pk| pk.name()) { - let counter = self - .local_custom_name - .entry((model.model_id(), Cow::from(name))) - .or_default(); + let counter = self.local_custom_name.entry((model.id, Cow::from(name))).or_default(); *counter += 1; } for index in model.indexes() { if let Some(name) = index.name() { - let counter = self - .local_custom_name - .entry((model.model_id(), Cow::from(name))) - .or_default(); + let counter = self.local_custom_name.entry((model.id, Cow::from(name))).or_default(); *counter += 1; } } @@ -175,7 +172,7 @@ impl<'db> ConstraintNamespace<'db> { .filter_map(|r| r.refine().as_inline()) .map(|r| r.constraint_name(ctx.connector)) { - let counter = self.local.entry((model.model_id(), scope, name)).or_default(); + let counter = self.local.entry((model.id, scope, name)).or_default(); *counter += 1; } diff --git a/psl/psl-core/src/validate/validation_pipeline/validations/fields.rs b/psl/psl-core/src/validate/validation_pipeline/validations/fields.rs index 0613fda2a48f..674d8e50d3bc 100644 --- a/psl/psl-core/src/validate/validation_pipeline/validations/fields.rs +++ b/psl/psl-core/src/validate/validation_pipeline/validations/fields.rs @@ -21,7 +21,7 @@ pub(super) fn validate_client_name(field: FieldWalker<'_>, names: &Names<'_>, ct "model" }; - for taken in names.name_taken(model.model_id(), field.name()).into_iter() { + for taken in names.name_taken(model.id, field.name()).into_iter() { match taken { NameTaken::Index => { let message = format!( @@ -82,7 +82,7 @@ pub(super) fn has_a_unique_default_constraint_name( }; for violation in names.constraint_namespace.constraint_name_scope_violations( - field.model().model_id(), + field.model().id, ConstraintName::Default(name.as_ref()), ctx, ) { diff --git a/psl/psl-core/src/validate/validation_pipeline/validations/indexes.rs b/psl/psl-core/src/validate/validation_pipeline/validations/indexes.rs index 9a7ac919fff7..e9bae626f374 100644 --- a/psl/psl-core/src/validate/validation_pipeline/validations/indexes.rs +++ b/psl/psl-core/src/validate/validation_pipeline/validations/indexes.rs @@ -14,11 +14,11 @@ pub(super) fn has_a_unique_constraint_name(index: IndexWalker<'_>, names: &super let name = index.constraint_name(ctx.connector); let model = index.model(); - for violation in names.constraint_namespace.constraint_name_scope_violations( - model.model_id(), - ConstraintName::Index(name.as_ref()), - ctx, - ) { + for violation in + names + .constraint_namespace + .constraint_name_scope_violations(model.id, ConstraintName::Index(name.as_ref()), ctx) + { let message = format!( "The given constraint name `{}` has to be unique in the following namespace: {}. Please provide a different name using the `map` argument.", name, @@ -52,7 +52,7 @@ pub(super) fn unique_index_has_a_unique_custom_name_per_model( if let Some(name) = index.name() { if names .constraint_namespace - .local_custom_name_scope_violations(model.model_id(), name.as_ref()) + .local_custom_name_scope_violations(model.id, name.as_ref()) { let message = format!( "The given custom name `{name}` has to be unique on the model. Please provide a different name for the `name` argument." diff --git a/psl/psl-core/src/validate/validation_pipeline/validations/models.rs b/psl/psl-core/src/validate/validation_pipeline/validations/models.rs index a8c222c91600..a53063624b2d 100644 --- a/psl/psl-core/src/validate/validation_pipeline/validations/models.rs +++ b/psl/psl-core/src/validate/validation_pipeline/validations/models.rs @@ -1,6 +1,5 @@ use super::database_name::validate_db_name; use crate::{ - ast, datamodel_connector::{walker_ext_traits::*, ConnectorCapability}, diagnostics::DatamodelError, parser_database::ast::{WithName, WithSpan}, @@ -77,7 +76,7 @@ pub(super) fn has_a_unique_primary_key_name(model: ModelWalker<'_>, names: &supe ); for violation in names.constraint_namespace.constraint_name_scope_violations( - model.model_id(), + model.id, super::constraint_namespace::ConstraintName::PrimaryKey(name.as_ref()), ctx, ) { @@ -115,7 +114,7 @@ pub(super) fn has_a_unique_custom_primary_key_name_per_model( if let Some(name) = pk.name() { if names .constraint_namespace - .local_custom_name_scope_violations(model.model_id(), name.as_ref()) + .local_custom_name_scope_violations(model.id, name.as_ref()) { let message = format!( "The given custom name `{name}` has to be unique on the model. Please provide a different name for the `name` argument." @@ -362,15 +361,16 @@ pub(super) fn schema_attribute_missing(model: ModelWalker<'_>, ctx: &mut Context pub(super) fn database_name_clashes(ctx: &mut Context<'_>) { // (schema_name, model_database_name) -> ModelId - let mut database_names: HashMap<(Option<&str>, &str), ast::ModelId> = HashMap::with_capacity(ctx.db.models_count()); + let mut database_names: HashMap<(Option<&str>, &str), parser_database::ModelId> = + HashMap::with_capacity(ctx.db.models_count()); for model in ctx.db.walk_models().chain(ctx.db.walk_views()) { let key = (model.schema().map(|(name, _)| name), model.database_name()); - match database_names.insert(key, model.model_id()) { + match database_names.insert(key, model.id) { // Two branches because we want to put the error on the @@map attribute, and it can be // on either model. Some(existing) if model.mapped_name().is_some() => { - let existing_model_name = &ctx.db.ast()[existing].name(); + let existing_model_name = &ctx.db.ast(existing.0)[existing.1].name(); let attribute = model .ast_model() .attributes @@ -385,7 +385,7 @@ pub(super) fn database_name_clashes(ctx: &mut Context<'_>) { )); } Some(existing) => { - let existing_model = &ctx.db.ast()[existing]; + let existing_model = &ctx.db.ast(existing.0)[existing.1]; let attribute = existing_model .attributes .iter() diff --git a/psl/psl-core/src/validate/validation_pipeline/validations/names.rs b/psl/psl-core/src/validate/validation_pipeline/validations/names.rs index 0c818610f082..fdc0afaf7b8b 100644 --- a/psl/psl-core/src/validate/validation_pipeline/validations/names.rs +++ b/psl/psl-core/src/validate/validation_pipeline/validations/names.rs @@ -1,6 +1,8 @@ use super::constraint_namespace::ConstraintNamespace; -use crate::ast::ModelId; -use parser_database::walkers::{RelationFieldId, RelationName}; +use parser_database::{ + walkers::{RelationFieldId, RelationName}, + ModelId, +}; use std::collections::{HashMap, HashSet}; type RelationIdentifier<'db> = (ModelId, ModelId, RelationName<'db>); @@ -28,11 +30,11 @@ impl<'db> Names<'db> { let mut primary_key_names: HashMap = HashMap::new(); for model in ctx.db.walk_models().chain(ctx.db.walk_views()) { - let model_id = model.model_id(); + let model_id = model.id; for field in model.relation_fields() { - let model_id = field.model().model_id(); - let related_model_id = field.related_model().model_id(); + let model_id = field.model().id; + let related_model_id = field.related_model().id; let identifier = (model_id, related_model_id, field.relation_name()); let field_ids = relation_names.entry(identifier).or_default(); @@ -51,7 +53,7 @@ impl<'db> Names<'db> { } if let Some(pk) = model.primary_key().and_then(|pk| pk.name()) { - primary_key_names.insert(model.model_id(), pk); + primary_key_names.insert(model.id, pk); } } diff --git a/psl/psl-core/src/validate/validation_pipeline/validations/relation_fields.rs b/psl/psl-core/src/validate/validation_pipeline/validations/relation_fields.rs index 146f119f149d..6d1b9cb51669 100644 --- a/psl/psl-core/src/validate/validation_pipeline/validations/relation_fields.rs +++ b/psl/psl-core/src/validate/validation_pipeline/validations/relation_fields.rs @@ -59,7 +59,7 @@ pub(super) fn ambiguity(field: RelationFieldWalker<'_>, names: &Names<'_>) -> Re let model = field.model(); let related_model = field.related_model(); - let identifier = (model.model_id(), related_model.model_id(), field.relation_name()); + let identifier = (model.id, related_model.id, field.relation_name()); match names.relation_names.get(&identifier) { Some(fields) if fields.len() > 1 => { diff --git a/psl/psl-core/src/validate/validation_pipeline/validations/relations.rs b/psl/psl-core/src/validate/validation_pipeline/validations/relations.rs index ec78b9a61a3f..e834fe3b54ea 100644 --- a/psl/psl-core/src/validate/validation_pipeline/validations/relations.rs +++ b/psl/psl-core/src/validate/validation_pipeline/validations/relations.rs @@ -38,7 +38,7 @@ pub(super) fn has_a_unique_constraint_name( let model = relation.referencing_model(); for violation in names.constraint_namespace.constraint_name_scope_violations( - model.model_id(), + model.id, ConstraintName::Relation(name.as_ref()), ctx, ) { diff --git a/psl/psl/src/lib.rs b/psl/psl/src/lib.rs index 9d7fb8f26168..d1c38eaf4330 100644 --- a/psl/psl/src/lib.rs +++ b/psl/psl/src/lib.rs @@ -44,7 +44,7 @@ pub fn parse_schema(file: impl Into) -> Result ValidatedSchema { pub fn parse_without_validation(file: SourceFile, connector_registry: ConnectorRegistry<'_>) -> ValidatedSchema { psl_core::parse_without_validation(file, connector_registry) } +/// The most general API for dealing with Prisma schemas. It accumulates what analysis and +/// validation information it can, and returns it along with any error and warning diagnostics. +pub fn validate_multi_file(files: Vec<(String, SourceFile)>) -> ValidatedSchema { + psl_core::validate_multi_file(files, builtin_connectors::BUILTIN_CONNECTORS) +} diff --git a/psl/psl/tests/common/asserts.rs b/psl/psl/tests/common/asserts.rs index 81d5472d4c16..4278f5cb77e5 100644 --- a/psl/psl/tests/common/asserts.rs +++ b/psl/psl/tests/common/asserts.rs @@ -3,7 +3,7 @@ use std::fmt::Debug; use either::Either::{Left, Right}; use psl::datamodel_connector::Connector; use psl::diagnostics::DatamodelWarning; -use psl::parser_database::{walkers, IndexAlgorithm, OperatorClass, ReferentialAction, ScalarType, SortOrder}; +use psl::parser_database::{walkers, IndexAlgorithm, ModelId, OperatorClass, ReferentialAction, ScalarType, SortOrder}; use psl::schema_ast::ast::WithDocumentation; use psl::schema_ast::ast::{self, FieldArity}; use psl::{Diagnostics, StringFromEnvVar}; @@ -67,7 +67,7 @@ pub(crate) trait CompositeFieldAssert { pub(crate) trait RelationFieldAssert { fn assert_ignored(&self, ignored: bool) -> &Self; - fn assert_relation_to(&self, model_id: ast::ModelId) -> &Self; + fn assert_relation_to(&self, model_id: ModelId) -> &Self; fn assert_relation_delete_strategy(&self, action: ReferentialAction) -> &Self; fn assert_relation_update_strategy(&self, action: ReferentialAction) -> &Self; } @@ -151,7 +151,7 @@ impl<'a> DatamodelAssert<'a> for psl::ValidatedSchema { impl<'a> RelationFieldAssert for walkers::RelationFieldWalker<'a> { #[track_caller] - fn assert_relation_to(&self, model_id: ast::ModelId) -> &Self { + fn assert_relation_to(&self, model_id: ModelId) -> &Self { assert!(self.references_model(model_id)); self } diff --git a/psl/psl/tests/config/nice_warnings.rs b/psl/psl/tests/config/nice_warnings.rs index 4e1c7ed2bfad..955cbbd89fd3 100644 --- a/psl/psl/tests/config/nice_warnings.rs +++ b/psl/psl/tests/config/nice_warnings.rs @@ -14,6 +14,6 @@ fn nice_warning_for_deprecated_generator_preview_feature() { res.warnings.assert_is(DatamodelWarning::new_feature_deprecated( "middlewares", - Span::new(88, 103), + Span::new(88, 103, psl_core::parser_database::FileId::ZERO), )); } diff --git a/psl/psl/tests/datamodel_tests.rs b/psl/psl/tests/datamodel_tests.rs index b950ff6fc2fd..ba723194a4fd 100644 --- a/psl/psl/tests/datamodel_tests.rs +++ b/psl/psl/tests/datamodel_tests.rs @@ -8,6 +8,7 @@ mod capabilities; mod common; mod config; mod functions; +mod multi_file; mod parsing; mod reformat; mod types; diff --git a/psl/psl/tests/multi_file/basic.rs b/psl/psl/tests/multi_file/basic.rs new file mode 100644 index 000000000000..fd1c2d0e4f95 --- /dev/null +++ b/psl/psl/tests/multi_file/basic.rs @@ -0,0 +1,114 @@ +use crate::common::expect; + +fn expect_errors(schemas: &[[&'static str; 2]], expectation: expect_test::Expect) { + let out = psl::validate_multi_file( + schemas + .iter() + .map(|[file_name, contents]| ((*file_name).into(), (*contents).into())) + .collect(), + ); + + let actual = out.render_diagnostics(); + expectation.assert_eq(&actual) +} + +#[test] +fn multi_file_errors_single_file() { + let files: &[[&'static str; 2]] = &[["a.prisma", "meow"]]; + + let expected = expect![[r#" + error: Error validating: This line is invalid. It does not start with any known Prisma schema keyword. + --> a.prisma:1 +  |  +  |  +  1 | meow +  |  + "#]]; + expect_errors(files, expected); +} + +#[test] +fn multi_file_errors_two_files() { + let files: &[[&'static str; 2]] = &[ + ["a.prisma", "meow"], + ["b.prisma", "woof woof"], + ["c.prisma", "choo choo"], + ]; + + let expected = expect![[r#" + error: Error validating: This line is invalid. It does not start with any known Prisma schema keyword. + --> a.prisma:1 +  |  +  |  +  1 | meow +  |  + error: Error validating: This line is invalid. It does not start with any known Prisma schema keyword. + --> b.prisma:1 +  |  +  |  +  1 | woof woof +  |  + error: Error validating: This line is invalid. It does not start with any known Prisma schema keyword. + --> c.prisma:1 +  |  +  |  +  1 | choo choo +  |  + "#]]; + expect_errors(files, expected); +} + +#[test] +fn multi_file_errors_relation() { + let files: &[[&'static str; 2]] = &[ + [ + "b.prisma", + r#" +generator client { + provider = "prisma-client-js" +} + +model Post { + id Int @id + test String @db.Text + user_id Int + user User @relation(fields: [user_id], references: [id]) +} +"#, + ], + [ + "a.prisma", + r#" +datasource db { + provider = "postgresql" + url = env("TEST_DATABASE_URL") +} + +model User { + id Int @id + test String @db.FunnyText + post_id Int @unique + post Post +} + +"#, + ], + ]; + + let expected = expect![[r#" + error: Native type FunnyText is not supported for postgresql connector. + --> a.prisma:9 +  |  +  8 |  id Int @id +  9 |  test String @db.FunnyText +  |  + error: Error parsing attribute "@relation": A one-to-one relation must use unique fields on the defining side. Either add an `@unique` attribute to the field `user_id`, or change the relation to one-to-many. + --> b.prisma:10 +  |  +  9 |  user_id Int + 10 |  user User @relation(fields: [user_id], references: [id]) + 11 | } +  |  + "#]]; + expect_errors(files, expected); +} diff --git a/psl/psl/tests/multi_file/mod.rs b/psl/psl/tests/multi_file/mod.rs new file mode 100644 index 000000000000..1bca5f8cba77 --- /dev/null +++ b/psl/psl/tests/multi_file/mod.rs @@ -0,0 +1 @@ +mod basic; diff --git a/psl/psl/tests/validation_tests.rs b/psl/psl/tests/validation_tests.rs index b6efaa4215c1..6d0120cf933e 100644 --- a/psl/psl/tests/validation_tests.rs +++ b/psl/psl/tests/validation_tests.rs @@ -10,7 +10,7 @@ fn parse_schema_fail_on_diagnostics(file: impl Into) -> Result Ok(schema), diff --git a/psl/schema-ast/src/ast/identifier.rs b/psl/schema-ast/src/ast/identifier.rs index d1c72732a54e..92eccefecf1a 100644 --- a/psl/schema-ast/src/ast/identifier.rs +++ b/psl/schema-ast/src/ast/identifier.rs @@ -1,4 +1,5 @@ use super::{Span, WithSpan}; +use diagnostics::FileId; /// An identifier. #[derive(Debug, Clone, PartialEq)] @@ -9,17 +10,17 @@ pub struct Identifier { pub span: Span, } -impl WithSpan for Identifier { - fn span(&self) -> Span { - self.span - } -} - -impl From> for Identifier { - fn from(pair: pest::iterators::Pair<'_, T>) -> Self { +impl Identifier { + pub(crate) fn new(pair: pest::iterators::Pair<'_, T>, file_id: FileId) -> Self { Identifier { name: pair.as_str().to_owned(), - span: pair.as_span().into(), + span: (file_id, pair.as_span()).into(), } } } + +impl WithSpan for Identifier { + fn span(&self) -> Span { + self.span + } +} diff --git a/psl/schema-ast/src/parser/parse_arguments.rs b/psl/schema-ast/src/parser/parse_arguments.rs index 67b5d930f83b..b2579c6e6cde 100644 --- a/psl/schema-ast/src/parser/parse_arguments.rs +++ b/psl/schema-ast/src/parser/parse_arguments.rs @@ -4,20 +4,25 @@ use super::{ Rule, }; use crate::ast; -use diagnostics::Diagnostics; +use diagnostics::{Diagnostics, FileId}; -pub(crate) fn parse_arguments_list(token: Pair<'_>, arguments: &mut ast::ArgumentsList, diagnostics: &mut Diagnostics) { +pub(crate) fn parse_arguments_list( + token: Pair<'_>, + arguments: &mut ast::ArgumentsList, + diagnostics: &mut Diagnostics, + file_id: FileId, +) { debug_assert_eq!(token.as_rule(), Rule::arguments_list); for current in token.into_inner() { let current_span = current.as_span(); match current.as_rule() { // This is a named arg. - Rule::named_argument => arguments.arguments.push(parse_named_arg(current, diagnostics)), + Rule::named_argument => arguments.arguments.push(parse_named_arg(current, diagnostics, file_id)), // This is an unnamed arg. Rule::expression => arguments.arguments.push(ast::Argument { name: None, - value: parse_expression(current, diagnostics), - span: ast::Span::from(current_span), + value: parse_expression(current, diagnostics, file_id), + span: ast::Span::from((file_id, current_span)), }), // This is an argument without a value. // It is not valid, but we parse it for autocompletion. @@ -26,17 +31,19 @@ pub(crate) fn parse_arguments_list(token: Pair<'_>, arguments: &mut ast::Argumen .into_inner() .find(|tok| tok.as_rule() == Rule::identifier) .unwrap(); - arguments.empty_arguments.push(ast::EmptyArgument { name: name.into() }) + arguments.empty_arguments.push(ast::EmptyArgument { + name: ast::Identifier::new(name, file_id), + }) } Rule::trailing_comma => { - arguments.trailing_comma = Some(current.as_span().into()); + arguments.trailing_comma = Some((file_id, current.as_span()).into()); } _ => parsing_catch_all(¤t, "attribute arguments"), } } } -fn parse_named_arg(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> ast::Argument { +fn parse_named_arg(pair: Pair<'_>, diagnostics: &mut Diagnostics, file_id: FileId) -> ast::Argument { debug_assert_eq!(pair.as_rule(), Rule::named_argument); let mut name: Option = None; let mut argument: Option = None; @@ -44,8 +51,8 @@ fn parse_named_arg(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> ast::Argume for current in pair.into_inner() { match current.as_rule() { - Rule::identifier => name = Some(current.into()), - Rule::expression => argument = Some(parse_expression(current, diagnostics)), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), + Rule::expression => argument = Some(parse_expression(current, diagnostics, file_id)), _ => parsing_catch_all(¤t, "attribute argument"), } } @@ -54,7 +61,7 @@ fn parse_named_arg(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> ast::Argume (Some(name), Some(value)) => ast::Argument { name: Some(name), value, - span: ast::Span::from(pair_span), + span: ast::Span::from((file_id, pair_span)), }, _ => panic!("Encountered impossible attribute arg during parsing: {pair_str:?}"), } diff --git a/psl/schema-ast/src/parser/parse_attribute.rs b/psl/schema-ast/src/parser/parse_attribute.rs index 16983303097b..6420d796ad6b 100644 --- a/psl/schema-ast/src/parser/parse_attribute.rs +++ b/psl/schema-ast/src/parser/parse_attribute.rs @@ -3,16 +3,21 @@ use super::{ Rule, }; use crate::{ast::*, parser::parse_arguments::parse_arguments_list}; +use diagnostics::FileId; -pub(crate) fn parse_attribute(pair: Pair<'_>, diagnostics: &mut diagnostics::Diagnostics) -> Attribute { - let span = Span::from(pair.as_span()); +pub(crate) fn parse_attribute( + pair: Pair<'_>, + diagnostics: &mut diagnostics::Diagnostics, + file_id: FileId, +) -> Attribute { + let span = Span::from((file_id, pair.as_span())); let mut name = None; let mut arguments: ArgumentsList = ArgumentsList::default(); for current in pair.into_inner() { match current.as_rule() { - Rule::path => name = Some(current.into()), - Rule::arguments_list => parse_arguments_list(current, &mut arguments, diagnostics), + Rule::path => name = Some(Identifier::new(current, file_id)), + Rule::arguments_list => parse_arguments_list(current, &mut arguments, diagnostics, file_id), _ => parsing_catch_all(¤t, "attribute"), } } diff --git a/psl/schema-ast/src/parser/parse_composite_type.rs b/psl/schema-ast/src/parser/parse_composite_type.rs index 6ada40e61e16..28873fbf701f 100644 --- a/psl/schema-ast/src/parser/parse_composite_type.rs +++ b/psl/schema-ast/src/parser/parse_composite_type.rs @@ -6,12 +6,13 @@ use super::{ Rule, }; use crate::ast; -use diagnostics::{DatamodelError, Diagnostics, Span}; +use diagnostics::{DatamodelError, Diagnostics, FileId, Span}; pub(crate) fn parse_composite_type( pair: Pair<'_>, doc_comment: Option>, diagnostics: &mut Diagnostics, + file_id: FileId, ) -> ast::CompositeType { let pair_span = pair.as_span(); let mut name: Option = None; @@ -22,53 +23,53 @@ pub(crate) fn parse_composite_type( match current.as_rule() { Rule::BLOCK_OPEN | Rule::BLOCK_CLOSE => {} Rule::TYPE_KEYWORD => (), - Rule::identifier => name = Some(current.into()), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), Rule::model_contents => { let mut pending_field_comment: Option> = None; - inner_span = Some(current.as_span().into()); + inner_span = Some((file_id, current.as_span()).into()); for item in current.into_inner() { let current_span = item.as_span(); match item.as_rule() { Rule::block_attribute => { - let attr = parse_attribute(item, diagnostics); + let attr = parse_attribute(item, diagnostics, file_id); let err = match attr.name.name.as_str() { "map" => { DatamodelError::new_validation_error( "The name of a composite type is not persisted in the database, therefore it does not need a mapped database name.", - current_span.into(), + (file_id, current_span).into(), ) } "unique" => { DatamodelError::new_validation_error( "A unique constraint should be defined in the model containing the embed.", - current_span.into(), + (file_id, current_span).into(), ) } "index" => { DatamodelError::new_validation_error( "An index should be defined in the model containing the embed.", - current_span.into(), + (file_id, current_span).into(), ) } "fulltext" => { DatamodelError::new_validation_error( "A fulltext index should be defined in the model containing the embed.", - current_span.into(), + (file_id, current_span).into(), ) } "id" => { DatamodelError::new_validation_error( "A composite type cannot define an id.", - current_span.into(), + (file_id, current_span).into(), ) } _ => { DatamodelError::new_validation_error( "A composite type cannot have block-level attributes.", - current_span.into(), + (file_id, current_span).into(), ) } }; @@ -81,6 +82,7 @@ pub(crate) fn parse_composite_type( item, pending_field_comment.take(), diagnostics, + file_id, ) { Ok(field) => { for attr in field.attributes.iter() { @@ -92,7 +94,7 @@ pub(crate) fn parse_composite_type( "Defining `@{name}` attribute for a field in a composite type is not allowed." ); - DatamodelError::new_validation_error(&msg, current_span.into()) + DatamodelError::new_validation_error(&msg, (file_id, current_span).into()) } _ => continue, }; @@ -107,7 +109,7 @@ pub(crate) fn parse_composite_type( Rule::comment_block => pending_field_comment = Some(item), Rule::BLOCK_LEVEL_CATCH_ALL => diagnostics.push_error(DatamodelError::new_validation_error( "This line is not a valid field or attribute definition.", - item.as_span().into(), + (file_id, item.as_span()).into(), )), _ => parsing_catch_all(&item, "composite type"), } @@ -122,7 +124,7 @@ pub(crate) fn parse_composite_type( name, fields, documentation: doc_comment.and_then(parse_comment_block), - span: ast::Span::from(pair_span), + span: ast::Span::from((file_id, pair_span)), inner_span: inner_span.unwrap(), }, _ => panic!("Encountered impossible model declaration during parsing",), diff --git a/psl/schema-ast/src/parser/parse_enum.rs b/psl/schema-ast/src/parser/parse_enum.rs index 5e5109de1a91..2dc1f8e7e3fd 100644 --- a/psl/schema-ast/src/parser/parse_enum.rs +++ b/psl/schema-ast/src/parser/parse_enum.rs @@ -4,10 +4,15 @@ use super::{ parse_comments::*, Rule, }; -use crate::ast::{Attribute, Comment, Enum, EnumValue, Identifier}; -use diagnostics::{DatamodelError, Diagnostics, Span}; +use crate::ast::{self, Attribute, Comment, Enum, EnumValue, Identifier}; +use diagnostics::{DatamodelError, Diagnostics, FileId, Span}; -pub fn parse_enum(pair: Pair<'_>, doc_comment: Option>, diagnostics: &mut Diagnostics) -> Enum { +pub fn parse_enum( + pair: Pair<'_>, + doc_comment: Option>, + diagnostics: &mut Diagnostics, + file_id: FileId, +) -> Enum { let comment: Option = doc_comment.and_then(parse_comment_block); let pair_span = pair.as_span(); let mut name: Option = None; @@ -19,16 +24,16 @@ pub fn parse_enum(pair: Pair<'_>, doc_comment: Option>, diagnostics: &m for current in pairs { match current.as_rule() { Rule::BLOCK_OPEN | Rule::BLOCK_CLOSE | Rule::ENUM_KEYWORD => {} - Rule::identifier => name = Some(current.into()), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), Rule::enum_contents => { let mut pending_value_comment = None; - inner_span = Some(current.as_span().into()); + inner_span = Some((file_id, current.as_span()).into()); for item in current.into_inner() { match item.as_rule() { - Rule::block_attribute => attributes.push(parse_attribute(item, diagnostics)), + Rule::block_attribute => attributes.push(parse_attribute(item, diagnostics, file_id)), Rule::enum_value_declaration => { - match parse_enum_value(item, pending_value_comment.take(), diagnostics) { + match parse_enum_value(item, pending_value_comment.take(), diagnostics, file_id) { Ok(enum_value) => values.push(enum_value), Err(err) => diagnostics.push_error(err), } @@ -36,7 +41,7 @@ pub fn parse_enum(pair: Pair<'_>, doc_comment: Option>, diagnostics: &m Rule::comment_block => pending_value_comment = Some(item), Rule::BLOCK_LEVEL_CATCH_ALL => diagnostics.push_error(DatamodelError::new_validation_error( "This line is not an enum value definition.", - item.as_span().into(), + (file_id, item.as_span()).into(), )), _ => parsing_catch_all(&item, "enum"), } @@ -52,7 +57,7 @@ pub fn parse_enum(pair: Pair<'_>, doc_comment: Option>, diagnostics: &m values, attributes, documentation: comment, - span: Span::from(pair_span), + span: Span::from((file_id, pair_span)), inner_span: inner_span.unwrap(), }, _ => panic!("Encountered impossible enum declaration during parsing, name is missing.",), @@ -63,6 +68,7 @@ fn parse_enum_value( pair: Pair<'_>, doc_comment: Option>, diagnostics: &mut Diagnostics, + file_id: FileId, ) -> Result { let (pair_str, pair_span) = (pair.as_str(), pair.as_span()); let mut name: Option = None; @@ -71,8 +77,8 @@ fn parse_enum_value( for current in pair.into_inner() { match current.as_rule() { - Rule::identifier => name = Some(current.into()), - Rule::field_attribute => attributes.push(parse_attribute(current, diagnostics)), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), + Rule::field_attribute => attributes.push(parse_attribute(current, diagnostics, file_id)), Rule::trailing_comment => { comment = match (comment, parse_trailing_comment(current)) { (None, a) | (a, None) => a, @@ -93,7 +99,7 @@ fn parse_enum_value( name, attributes, documentation: comment, - span: Span::from(pair_span), + span: Span::from((file_id, pair_span)), }), _ => panic!("Encountered impossible enum value declaration during parsing, name is missing: {pair_str:?}",), } diff --git a/psl/schema-ast/src/parser/parse_expression.rs b/psl/schema-ast/src/parser/parse_expression.rs index c5a9b68b17fc..f252bbbc41bc 100644 --- a/psl/schema-ast/src/parser/parse_expression.rs +++ b/psl/schema-ast/src/parser/parse_expression.rs @@ -4,17 +4,21 @@ use super::{ Rule, }; use crate::ast::*; -use diagnostics::{DatamodelError, Diagnostics}; +use diagnostics::{DatamodelError, Diagnostics, FileId}; -pub(crate) fn parse_expression(token: Pair<'_>, diagnostics: &mut diagnostics::Diagnostics) -> Expression { +pub(crate) fn parse_expression( + token: Pair<'_>, + diagnostics: &mut diagnostics::Diagnostics, + file_id: FileId, +) -> Expression { let first_child = token.into_inner().next().unwrap(); - let span = Span::from(first_child.as_span()); + let span = Span::from((file_id, first_child.as_span())); match first_child.as_rule() { Rule::numeric_literal => Expression::NumericValue(first_child.as_str().to_string(), span), - Rule::string_literal => Expression::StringValue(parse_string_literal(first_child, diagnostics), span), + Rule::string_literal => Expression::StringValue(parse_string_literal(first_child, diagnostics, file_id), span), Rule::path => Expression::ConstantValue(first_child.as_str().to_string(), span), - Rule::function_call => parse_function(first_child, diagnostics), - Rule::array_expression => parse_array(first_child, diagnostics), + Rule::function_call => parse_function(first_child, diagnostics, file_id), + Rule::array_expression => parse_array(first_child, diagnostics, file_id), _ => unreachable!( "Encountered impossible literal during parsing: {:?}", first_child.tokens() @@ -22,7 +26,7 @@ pub(crate) fn parse_expression(token: Pair<'_>, diagnostics: &mut diagnostics::D } } -fn parse_function(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> Expression { +fn parse_function(pair: Pair<'_>, diagnostics: &mut Diagnostics, file_id: FileId) -> Expression { let mut name: Option = None; let mut arguments = ArgumentsList::default(); let (pair_str, span) = (pair.as_str(), pair.as_span()); @@ -30,32 +34,32 @@ fn parse_function(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> Expression { for current in pair.into_inner() { match current.as_rule() { Rule::path => name = Some(current.as_str().to_string()), - Rule::arguments_list => parse_arguments_list(current, &mut arguments, diagnostics), + Rule::arguments_list => parse_arguments_list(current, &mut arguments, diagnostics, file_id), _ => parsing_catch_all(¤t, "function"), } } match name { - Some(name) => Expression::Function(name, arguments, Span::from(span)), + Some(name) => Expression::Function(name, arguments, Span::from((file_id, span))), _ => unreachable!("Encountered impossible function during parsing: {:?}", pair_str), } } -fn parse_array(token: Pair<'_>, diagnostics: &mut Diagnostics) -> Expression { +fn parse_array(token: Pair<'_>, diagnostics: &mut Diagnostics, file_id: FileId) -> Expression { let mut elements: Vec = vec![]; let span = token.as_span(); for current in token.into_inner() { match current.as_rule() { - Rule::expression => elements.push(parse_expression(current, diagnostics)), + Rule::expression => elements.push(parse_expression(current, diagnostics, file_id)), _ => parsing_catch_all(¤t, "array"), } } - Expression::Array(elements, Span::from(span)) + Expression::Array(elements, Span::from((file_id, span))) } -fn parse_string_literal(token: Pair<'_>, diagnostics: &mut Diagnostics) -> String { +fn parse_string_literal(token: Pair<'_>, diagnostics: &mut Diagnostics, file_id: FileId) -> String { assert!(token.as_rule() == Rule::string_literal); let contents = token.clone().into_inner().next().unwrap(); let contents_str = contents.as_str(); @@ -98,6 +102,7 @@ fn parse_string_literal(token: Pair<'_>, diagnostics: &mut Diagnostics) -> Strin &contents_str[start..], contents.as_span().start() + start, diagnostics, + file_id, ); if let Some(char) = char { @@ -109,7 +114,7 @@ fn parse_string_literal(token: Pair<'_>, diagnostics: &mut Diagnostics) -> Strin } } (_, c) => { - let mut final_span: crate::ast::Span = contents.as_span().into(); + let mut final_span: crate::ast::Span = (file_id, contents.as_span()).into(); final_span.start += start; final_span.end = final_span.start + 1 + c.len_utf8(); diagnostics.push_error(DatamodelError::new_static( @@ -132,11 +137,13 @@ fn try_parse_unicode_codepoint( slice: &str, slice_offset: usize, diagnostics: &mut Diagnostics, + file_id: FileId, ) -> (usize, Option) { let unicode_sequence_error = |consumed| { let span = crate::ast::Span { start: slice_offset, end: (slice_offset + slice.len()).min(slice_offset + consumed), + file_id, }; DatamodelError::new_static("Invalid unicode escape sequence.", span) }; diff --git a/psl/schema-ast/src/parser/parse_field.rs b/psl/schema-ast/src/parser/parse_field.rs index 6f11da80aaf5..488a315b66b5 100644 --- a/psl/schema-ast/src/parser/parse_field.rs +++ b/psl/schema-ast/src/parser/parse_field.rs @@ -5,8 +5,8 @@ use super::{ parse_types::parse_field_type, Rule, }; -use crate::ast::*; -use diagnostics::{DatamodelError, Diagnostics}; +use crate::ast::{self, *}; +use diagnostics::{DatamodelError, Diagnostics, FileId}; pub(crate) fn parse_field( model_name: &str, @@ -14,6 +14,7 @@ pub(crate) fn parse_field( pair: Pair<'_>, block_comment: Option>, diagnostics: &mut Diagnostics, + file_id: FileId, ) -> Result { let pair_span = pair.as_span(); let mut name: Option = None; @@ -23,15 +24,15 @@ pub(crate) fn parse_field( for current in pair.into_inner() { match current.as_rule() { - Rule::identifier => name = Some(current.into()), - Rule::field_type => field_type = Some(parse_field_type(current, diagnostics)?), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), + Rule::field_type => field_type = Some(parse_field_type(current, diagnostics, file_id)?), Rule::LEGACY_COLON => { return Err(DatamodelError::new_legacy_parser_error( "Field declarations don't require a `:`.", - current.as_span().into(), + (file_id, current.as_span()).into(), )) } - Rule::field_attribute => attributes.push(parse_attribute(current, diagnostics)), + Rule::field_attribute => attributes.push(parse_attribute(current, diagnostics, file_id)), Rule::trailing_comment => { comment = match (comment, parse_trailing_comment(current)) { (c, None) | (None, c) => c, @@ -51,13 +52,13 @@ pub(crate) fn parse_field( arity, attributes, documentation: comment, - span: Span::from(pair_span), + span: Span::from((file_id, pair_span)), }), _ => Err(DatamodelError::new_model_validation_error( "This field declaration is invalid. It is either missing a name or a type.", container_type, model_name, - pair_span.into(), + (file_id, pair_span).into(), )), } } diff --git a/psl/schema-ast/src/parser/parse_model.rs b/psl/schema-ast/src/parser/parse_model.rs index f2aec884d61f..549ba52c5320 100644 --- a/psl/schema-ast/src/parser/parse_model.rs +++ b/psl/schema-ast/src/parser/parse_model.rs @@ -5,10 +5,15 @@ use super::{ parse_field::parse_field, Rule, }; -use crate::ast::*; -use diagnostics::{DatamodelError, Diagnostics}; +use crate::ast::{self, *}; +use diagnostics::{DatamodelError, Diagnostics, FileId}; -pub(crate) fn parse_model(pair: Pair<'_>, doc_comment: Option>, diagnostics: &mut Diagnostics) -> Model { +pub(crate) fn parse_model( + pair: Pair<'_>, + doc_comment: Option>, + diagnostics: &mut Diagnostics, + file_id: FileId, +) -> Model { let pair_span = pair.as_span(); let mut name: Option = None; let mut attributes: Vec = Vec::new(); @@ -17,19 +22,20 @@ pub(crate) fn parse_model(pair: Pair<'_>, doc_comment: Option>, diagnos for current in pair.into_inner() { match current.as_rule() { Rule::MODEL_KEYWORD | Rule::BLOCK_OPEN | Rule::BLOCK_CLOSE => {} - Rule::identifier => name = Some(current.into()), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), Rule::model_contents => { let mut pending_field_comment: Option> = None; for item in current.into_inner() { match item.as_rule() { - Rule::block_attribute => attributes.push(parse_attribute(item, diagnostics)), + Rule::block_attribute => attributes.push(parse_attribute(item, diagnostics, file_id)), Rule::field_declaration => match parse_field( &name.as_ref().unwrap().name, "model", item, pending_field_comment.take(), diagnostics, + file_id, ) { Ok(field) => fields.push(field), Err(err) => diagnostics.push_error(err), @@ -37,7 +43,7 @@ pub(crate) fn parse_model(pair: Pair<'_>, doc_comment: Option>, diagnos Rule::comment_block => pending_field_comment = Some(item), Rule::BLOCK_LEVEL_CATCH_ALL => diagnostics.push_error(DatamodelError::new_validation_error( "This line is not a valid field or attribute definition.", - item.as_span().into(), + (file_id, item.as_span()).into(), )), _ => parsing_catch_all(&item, "model"), } @@ -54,7 +60,7 @@ pub(crate) fn parse_model(pair: Pair<'_>, doc_comment: Option>, diagnos attributes, documentation: doc_comment.and_then(parse_comment_block), is_view: false, - span: Span::from(pair_span), + span: Span::from((file_id, pair_span)), }, _ => panic!("Encountered impossible model declaration during parsing",), } diff --git a/psl/schema-ast/src/parser/parse_schema.rs b/psl/schema-ast/src/parser/parse_schema.rs index 6782caab9e44..eb26a48478b0 100644 --- a/psl/schema-ast/src/parser/parse_schema.rs +++ b/psl/schema-ast/src/parser/parse_schema.rs @@ -3,11 +3,11 @@ use super::{ parse_source_and_generator::parse_config_block, parse_view::parse_view, PrismaDatamodelParser, Rule, }; use crate::ast::*; -use diagnostics::{DatamodelError, Diagnostics}; +use diagnostics::{DatamodelError, Diagnostics, FileId}; use pest::Parser; /// Parse a PSL string and return its AST. -pub fn parse_schema(datamodel_string: &str, diagnostics: &mut Diagnostics) -> SchemaAst { +pub fn parse_schema(datamodel_string: &str, diagnostics: &mut Diagnostics, file_id: FileId) -> SchemaAst { let datamodel_result = PrismaDatamodelParser::parse(Rule::schema, datamodel_string); match datamodel_result { @@ -24,26 +24,26 @@ pub fn parse_schema(datamodel_string: &str, diagnostics: &mut Diagnostics) -> Sc match keyword.as_rule() { Rule::TYPE_KEYWORD => { - top_level_definitions.push(Top::CompositeType(parse_composite_type(current, pending_block_comment.take(), diagnostics))) + top_level_definitions.push(Top::CompositeType(parse_composite_type(current, pending_block_comment.take(), diagnostics, file_id))) } Rule::MODEL_KEYWORD => { - top_level_definitions.push(Top::Model(parse_model(current, pending_block_comment.take(), diagnostics))) + top_level_definitions.push(Top::Model(parse_model(current, pending_block_comment.take(), diagnostics, file_id))) } Rule::VIEW_KEYWORD => { - top_level_definitions.push(Top::Model(parse_view(current, pending_block_comment.take(), diagnostics))) + top_level_definitions.push(Top::Model(parse_view(current, pending_block_comment.take(), diagnostics, file_id))) } _ => unreachable!(), } }, - Rule::enum_declaration => top_level_definitions.push(Top::Enum(parse_enum(current,pending_block_comment.take(), diagnostics))), + Rule::enum_declaration => top_level_definitions.push(Top::Enum(parse_enum(current,pending_block_comment.take(), diagnostics, file_id))), Rule::config_block => { - top_level_definitions.push(parse_config_block(current, diagnostics)); + top_level_definitions.push(parse_config_block(current, diagnostics, file_id)); }, Rule::type_alias => { let error = DatamodelError::new_validation_error( "Invalid type definition. Please check the documentation in https://pris.ly/d/composite-types", - current.as_span().into() + (file_id, current.as_span()).into() ); diagnostics.push_error(error); @@ -62,12 +62,12 @@ pub fn parse_schema(datamodel_string: &str, diagnostics: &mut Diagnostics) -> Sc Rule::EOI => {} Rule::CATCH_ALL => diagnostics.push_error(DatamodelError::new_validation_error( "This line is invalid. It does not start with any known Prisma schema keyword.", - current.as_span().into(), + (file_id, current.as_span()).into(), )), // TODO: Add view when we want it to be more visible as a feature. Rule::arbitrary_block => diagnostics.push_error(DatamodelError::new_validation_error( "This block is invalid. It does not start with any known Prisma schema keyword. Valid keywords include \'model\', \'enum\', \'type\', \'datasource\' and \'generator\'.", - current.as_span().into(), + (file_id, current.as_span()).into(), )), Rule::empty_lines => (), _ => unreachable!(), @@ -89,7 +89,7 @@ pub fn parse_schema(datamodel_string: &str, diagnostics: &mut Diagnostics) -> Sc _ => panic!("Could not construct parsing error. This should never happend."), }; - diagnostics.push_error(DatamodelError::new_parser_error(expected, location.into())); + diagnostics.push_error(DatamodelError::new_parser_error(expected, (file_id, location).into())); SchemaAst { tops: Vec::new() } } diff --git a/psl/schema-ast/src/parser/parse_source_and_generator.rs b/psl/schema-ast/src/parser/parse_source_and_generator.rs index d5abb6935fca..4c8285e0b5f6 100644 --- a/psl/schema-ast/src/parser/parse_source_and_generator.rs +++ b/psl/schema-ast/src/parser/parse_source_and_generator.rs @@ -4,11 +4,10 @@ use super::{ parse_expression::parse_expression, Rule, }; -use crate::ast::*; -use diagnostics::{DatamodelError, Diagnostics}; +use crate::ast::{self, *}; +use diagnostics::{DatamodelError, Diagnostics, FileId}; -#[track_caller] -pub(crate) fn parse_config_block(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> Top { +pub(crate) fn parse_config_block(pair: Pair<'_>, diagnostics: &mut Diagnostics, file_id: FileId) -> Top { let pair_span = pair.as_span(); let mut name: Option = None; let mut properties = Vec::new(); @@ -19,10 +18,10 @@ pub(crate) fn parse_config_block(pair: Pair<'_>, diagnostics: &mut Diagnostics) for current in pair.into_inner() { match current.as_rule() { Rule::config_contents => { - inner_span = Some(current.as_span().into()); + inner_span = Some((file_id, current.as_span()).into()); for item in current.into_inner() { match item.as_rule() { - Rule::key_value => properties.push(parse_key_value(item, diagnostics)), + Rule::key_value => properties.push(parse_key_value(item, diagnostics, file_id)), Rule::comment_block => comment = parse_comment_block(item), Rule::BLOCK_LEVEL_CATCH_ALL => { let msg = format!( @@ -30,14 +29,14 @@ pub(crate) fn parse_config_block(pair: Pair<'_>, diagnostics: &mut Diagnostics) kw.unwrap_or("configuration block") ); - let err = DatamodelError::new_validation_error(&msg, item.as_span().into()); + let err = DatamodelError::new_validation_error(&msg, (file_id, item.as_span()).into()); diagnostics.push_error(err); } _ => parsing_catch_all(&item, "source"), } } } - Rule::identifier => name = Some(current.into()), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), Rule::DATASOURCE_KEYWORD | Rule::GENERATOR_KEYWORD => kw = Some(current.as_str()), Rule::BLOCK_OPEN | Rule::BLOCK_CLOSE => {} @@ -50,28 +49,28 @@ pub(crate) fn parse_config_block(pair: Pair<'_>, diagnostics: &mut Diagnostics) name: name.unwrap(), properties, documentation: comment, - span: Span::from(pair_span), + span: Span::from((file_id, pair_span)), inner_span: inner_span.unwrap(), }), Some("generator") => Top::Generator(GeneratorConfig { name: name.unwrap(), properties, documentation: comment, - span: Span::from(pair_span), + span: Span::from((file_id, pair_span)), }), _ => unreachable!(), } } -fn parse_key_value(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> ConfigBlockProperty { +fn parse_key_value(pair: Pair<'_>, diagnostics: &mut Diagnostics, file_id: FileId) -> ConfigBlockProperty { let mut name: Option = None; let mut value: Option = None; let (pair_span, pair_str) = (pair.as_span(), pair.as_str()); for current in pair.into_inner() { match current.as_rule() { - Rule::identifier => name = Some(current.into()), - Rule::expression => value = Some(parse_expression(current, diagnostics)), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), + Rule::expression => value = Some(parse_expression(current, diagnostics, file_id)), Rule::trailing_comment => (), _ => unreachable!( "Encountered impossible source property declaration during parsing: {:?}", @@ -84,7 +83,7 @@ fn parse_key_value(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> ConfigBlock (Some(name), value) => ConfigBlockProperty { name, value, - span: Span::from(pair_span), + span: Span::from((file_id, pair_span)), }, _ => unreachable!( "Encountered impossible source property declaration during parsing: {:?}", diff --git a/psl/schema-ast/src/parser/parse_types.rs b/psl/schema-ast/src/parser/parse_types.rs index 7629ae636f82..d22cfe986fd7 100644 --- a/psl/schema-ast/src/parser/parse_types.rs +++ b/psl/schema-ast/src/parser/parse_types.rs @@ -1,47 +1,48 @@ use super::{helpers::Pair, Rule}; use crate::{ast::*, parser::parse_expression::parse_expression}; -use diagnostics::{DatamodelError, Diagnostics}; +use diagnostics::{DatamodelError, Diagnostics, FileId}; pub fn parse_field_type( pair: Pair<'_>, diagnostics: &mut Diagnostics, + file_id: FileId, ) -> Result<(FieldArity, FieldType), DatamodelError> { assert!(pair.as_rule() == Rule::field_type); let current = pair.into_inner().next().unwrap(); match current.as_rule() { Rule::optional_type => Ok(( FieldArity::Optional, - parse_base_type(current.into_inner().next().unwrap(), diagnostics), + parse_base_type(current.into_inner().next().unwrap(), diagnostics, file_id), )), - Rule::base_type => Ok((FieldArity::Required, parse_base_type(current, diagnostics))), + Rule::base_type => Ok((FieldArity::Required, parse_base_type(current, diagnostics, file_id))), Rule::list_type => Ok(( FieldArity::List, - parse_base_type(current.into_inner().next().unwrap(), diagnostics), + parse_base_type(current.into_inner().next().unwrap(), diagnostics, file_id), )), Rule::legacy_required_type => Err(DatamodelError::new_legacy_parser_error( "Fields are required by default, `!` is no longer required.", - current.as_span().into(), + (file_id, current.as_span()).into(), )), Rule::legacy_list_type => Err(DatamodelError::new_legacy_parser_error( "To specify a list, please use `Type[]` instead of `[Type]`.", - current.as_span().into(), + (file_id, current.as_span()).into(), )), Rule::unsupported_optional_list_type => Err(DatamodelError::new_legacy_parser_error( "Optional lists are not supported. Use either `Type[]` or `Type?`.", - current.as_span().into(), + (file_id, current.as_span()).into(), )), _ => unreachable!("Encountered impossible field during parsing: {:?}", current.tokens()), } } -fn parse_base_type(pair: Pair<'_>, diagnostics: &mut Diagnostics) -> FieldType { +fn parse_base_type(pair: Pair<'_>, diagnostics: &mut Diagnostics, file_id: FileId) -> FieldType { let current = pair.into_inner().next().unwrap(); match current.as_rule() { Rule::identifier => FieldType::Supported(Identifier { name: current.as_str().to_string(), - span: Span::from(current.as_span()), + span: Span::from((file_id, current.as_span())), }), - Rule::unsupported_type => match parse_expression(current, diagnostics) { + Rule::unsupported_type => match parse_expression(current, diagnostics, file_id) { Expression::StringValue(lit, span) => FieldType::Unsupported(lit, span), _ => unreachable!("Encountered impossible type during parsing"), }, diff --git a/psl/schema-ast/src/parser/parse_view.rs b/psl/schema-ast/src/parser/parse_view.rs index 38066067b7a8..546c6e775c67 100644 --- a/psl/schema-ast/src/parser/parse_view.rs +++ b/psl/schema-ast/src/parser/parse_view.rs @@ -6,9 +6,14 @@ use super::{ Rule, }; use crate::ast::{self, Attribute}; -use diagnostics::{DatamodelError, Diagnostics}; +use diagnostics::{DatamodelError, Diagnostics, FileId}; -pub(crate) fn parse_view(pair: Pair<'_>, doc_comment: Option>, diagnostics: &mut Diagnostics) -> ast::Model { +pub(crate) fn parse_view( + pair: Pair<'_>, + doc_comment: Option>, + diagnostics: &mut Diagnostics, + file_id: FileId, +) -> ast::Model { let pair_span = pair.as_span(); let mut name: Option = None; let mut fields: Vec = vec![]; @@ -17,19 +22,20 @@ pub(crate) fn parse_view(pair: Pair<'_>, doc_comment: Option>, diagnost for current in pair.into_inner() { match current.as_rule() { Rule::VIEW_KEYWORD | Rule::BLOCK_OPEN | Rule::BLOCK_CLOSE => (), - Rule::identifier => name = Some(current.into()), + Rule::identifier => name = Some(ast::Identifier::new(current, file_id)), Rule::model_contents => { let mut pending_field_comment: Option> = None; for item in current.into_inner() { match item.as_rule() { - Rule::block_attribute => attributes.push(parse_attribute(item, diagnostics)), + Rule::block_attribute => attributes.push(parse_attribute(item, diagnostics, file_id)), Rule::field_declaration => match parse_field( &name.as_ref().unwrap().name, "view", item, pending_field_comment.take(), diagnostics, + file_id, ) { Ok(field) => fields.push(field), Err(err) => diagnostics.push_error(err), @@ -37,7 +43,7 @@ pub(crate) fn parse_view(pair: Pair<'_>, doc_comment: Option>, diagnost Rule::comment_block => pending_field_comment = Some(item), Rule::BLOCK_LEVEL_CATCH_ALL => diagnostics.push_error(DatamodelError::new_validation_error( "This line is not a valid field or attribute definition.", - item.as_span().into(), + (file_id, item.as_span()).into(), )), _ => parsing_catch_all(&item, "view"), } @@ -54,7 +60,7 @@ pub(crate) fn parse_view(pair: Pair<'_>, doc_comment: Option>, diagnost attributes, documentation: doc_comment.and_then(parse_comment_block), is_view: true, - span: ast::Span::from(pair_span), + span: ast::Span::from((file_id, pair_span)), }, _ => panic!("Encountered impossible model declaration during parsing",), } diff --git a/query-engine/connector-test-kit-rs/query-tests-setup/src/runner/mod.rs b/query-engine/connector-test-kit-rs/query-tests-setup/src/runner/mod.rs index 4f0e9aea1f21..a5b376e4fb68 100644 --- a/query-engine/connector-test-kit-rs/query-tests-setup/src/runner/mod.rs +++ b/query-engine/connector-test-kit-rs/query-tests-setup/src/runner/mod.rs @@ -217,7 +217,7 @@ impl Runner { } pub fn prisma_dml(&self) -> &str { - self.query_schema.internal_data_model.schema.db.source() + self.query_schema.internal_data_model.schema.db.source_assert_single() } pub fn max_bind_values(&self) -> Option { diff --git a/query-engine/query-engine-node-api/src/engine.rs b/query-engine/query-engine-node-api/src/engine.rs index 4ca524af699c..d9f5314e2489 100644 --- a/query-engine/query-engine-node-api/src/engine.rs +++ b/query-engine/query-engine-node-api/src/engine.rs @@ -122,7 +122,7 @@ impl QueryEngine { schema .diagnostics .to_result() - .map_err(|err| ApiError::conversion(err, schema.db.source()))?; + .map_err(|err| ApiError::conversion(err, schema.db.source_assert_single()))?; config .resolve_datasource_urls_query_engine( @@ -130,11 +130,11 @@ impl QueryEngine { |key| env.get(key).map(ToString::to_string), ignore_env_var_errors, ) - .map_err(|err| ApiError::conversion(err, schema.db.source()))?; + .map_err(|err| ApiError::conversion(err, schema.db.source_assert_single()))?; config .validate_that_one_datasource_is_provided() - .map_err(|errors| ApiError::conversion(errors, schema.db.source()))?; + .map_err(|errors| ApiError::conversion(errors, schema.db.source_assert_single()))?; let enable_metrics = config.preview_features().contains(PreviewFeature::Metrics); let enable_tracing = config.preview_features().contains(PreviewFeature::Tracing); @@ -203,7 +203,10 @@ impl QueryEngine { builder.native.env.get(key).map(ToString::to_string) }) .map_err(|err| { - crate::error::ApiError::Conversion(err, builder.schema.db.source().to_owned()) + crate::error::ApiError::Conversion( + err, + builder.schema.db.source_assert_single().to_owned(), + ) })?; ConnectorKind::Rust { url, diff --git a/query-engine/query-structure/src/composite_type.rs b/query-engine/query-structure/src/composite_type.rs index 431c033dd195..9bbff74e1290 100644 --- a/query-engine/query-structure/src/composite_type.rs +++ b/query-engine/query-structure/src/composite_type.rs @@ -1,6 +1,7 @@ use crate::{ast, Field}; +use psl::parser_database::CompositeTypeId; -pub type CompositeType = crate::Zipper; +pub type CompositeType = crate::Zipper; impl CompositeType { pub fn name(&self) -> &str { diff --git a/query-engine/query-structure/src/field/composite.rs b/query-engine/query-structure/src/field/composite.rs index 30564e5859b7..aebe2b36aadf 100644 --- a/query-engine/query-structure/src/field/composite.rs +++ b/query-engine/query-structure/src/field/composite.rs @@ -1,6 +1,6 @@ use crate::{parent_container::ParentContainer, CompositeType}; use psl::{ - parser_database::ScalarFieldId, + parser_database::{self as db, ScalarFieldId}, schema_ast::ast::{self, FieldArity}, }; use std::fmt::{Debug, Display}; @@ -8,7 +8,7 @@ use std::fmt::{Debug, Display}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum CompositeFieldId { InModel(ScalarFieldId), - InCompositeType((ast::CompositeTypeId, ast::FieldId)), + InCompositeType((db::CompositeTypeId, ast::FieldId)), } pub type CompositeField = crate::Zipper; diff --git a/query-engine/query-structure/src/field/mod.rs b/query-engine/query-structure/src/field/mod.rs index 39e43f186c13..d8faf404e662 100644 --- a/query-engine/query-structure/src/field/mod.rs +++ b/query-engine/query-structure/src/field/mod.rs @@ -6,8 +6,8 @@ pub use composite::*; pub use relation::*; pub use scalar::*; -use crate::{ast, parent_container::ParentContainer, Model}; -use psl::parser_database::{walkers, ScalarType}; +use crate::{parent_container::ParentContainer, Model}; +use psl::parser_database::{walkers, EnumId, ScalarType}; use std::{borrow::Cow, hash::Hash}; #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -143,7 +143,7 @@ pub enum TypeIdentifier { Float, Decimal, Boolean, - Enum(ast::EnumId), + Enum(EnumId), UUID, Json, DateTime, diff --git a/query-engine/query-structure/src/field/scalar.rs b/query-engine/query-structure/src/field/scalar.rs index becd438db276..c03ada0a9b71 100644 --- a/query-engine/query-structure/src/field/scalar.rs +++ b/query-engine/query-structure/src/field/scalar.rs @@ -1,7 +1,7 @@ use crate::{ast, parent_container::ParentContainer, prelude::*, DefaultKind, NativeTypeInstance, ValueGenerator}; use chrono::{DateTime, FixedOffset}; use psl::{ - parser_database::{walkers, ScalarFieldType, ScalarType}, + parser_database::{self as db, walkers, ScalarFieldType, ScalarType}, schema_ast::ast::FieldArity, }; use std::fmt::{Debug, Display}; @@ -12,7 +12,7 @@ pub type ScalarFieldRef = ScalarField; #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum ScalarFieldId { InModel(psl::parser_database::ScalarFieldId), - InCompositeType((ast::CompositeTypeId, ast::FieldId)), + InCompositeType((db::CompositeTypeId, ast::FieldId)), } impl ScalarField { diff --git a/query-engine/query-structure/src/internal_data_model.rs b/query-engine/query-structure/src/internal_data_model.rs index 70f8761cbdc0..ce8dd059fa03 100644 --- a/query-engine/query-structure/src/internal_data_model.rs +++ b/query-engine/query-structure/src/internal_data_model.rs @@ -1,5 +1,5 @@ use crate::{prelude::*, CompositeType, InternalEnum}; -use psl::schema_ast::ast; +use psl::parser_database as db; use std::sync::Arc; pub(crate) type InternalDataModelRef = InternalDataModel; @@ -52,11 +52,11 @@ impl InternalDataModel { .ok_or_else(|| DomainError::ModelNotFound { name: name.to_string() }) } - pub fn find_composite_type_by_id(&self, ctid: ast::CompositeTypeId) -> CompositeType { + pub fn find_composite_type_by_id(&self, ctid: db::CompositeTypeId) -> CompositeType { self.clone().zip(ctid) } - pub fn find_model_by_id(&self, model_id: ast::ModelId) -> Model { + pub fn find_model_by_id(&self, model_id: db::ModelId) -> Model { self.clone().zip(model_id) } diff --git a/query-engine/query-structure/src/internal_enum.rs b/query-engine/query-structure/src/internal_enum.rs index 6467adcebf6d..13dfd7206dca 100644 --- a/query-engine/query-structure/src/internal_enum.rs +++ b/query-engine/query-structure/src/internal_enum.rs @@ -1,9 +1,8 @@ use crate::Zipper; +use psl::{parser_database::EnumId, schema_ast::ast::EnumValueId}; -use psl::schema_ast::ast; - -pub type InternalEnum = Zipper; -pub type InternalEnumValue = Zipper; +pub type InternalEnum = Zipper; +pub type InternalEnumValue = Zipper; impl InternalEnum { pub fn name(&self) -> &str { diff --git a/query-engine/query-structure/src/model.rs b/query-engine/query-structure/src/model.rs index a2d9fa4ff462..310df1fbe6c3 100644 --- a/query-engine/query-structure/src/model.rs +++ b/query-engine/query-structure/src/model.rs @@ -1,7 +1,7 @@ use crate::prelude::*; -use psl::{parser_database::walkers, schema_ast::ast}; +use psl::parser_database::{walkers, ModelId}; -pub type Model = crate::Zipper; +pub type Model = crate::Zipper; impl Model { pub fn name(&self) -> &str { diff --git a/query-engine/schema/src/build.rs b/query-engine/schema/src/build.rs index 2970be408b59..b4562757b983 100644 --- a/query-engine/schema/src/build.rs +++ b/query-engine/schema/src/build.rs @@ -16,7 +16,7 @@ pub(crate) use output_types::{mutation_type, query_type}; use self::{enum_types::*, utils::*}; use crate::*; use psl::{datamodel_connector::ConnectorCapability, PreviewFeatures}; -use query_structure::{ast, Field as ModelField, Model, RelationFieldRef, TypeIdentifier}; +use query_structure::{Field as ModelField, Model, RelationFieldRef, TypeIdentifier}; pub fn build(schema: Arc, enable_raw_queries: bool) -> QuerySchema { let preview_features = schema.configuration.preview_features(); diff --git a/query-engine/schema/src/build/enum_types.rs b/query-engine/schema/src/build/enum_types.rs index b0ddc66a638d..7401732e939b 100644 --- a/query-engine/schema/src/build/enum_types.rs +++ b/query-engine/schema/src/build/enum_types.rs @@ -1,6 +1,7 @@ use super::*; use crate::EnumType; use constants::{filters, itx, json_null, load_strategy, ordering}; +use psl::parser_database as db; use query_structure::prelude::ParentContainer; pub(crate) fn sort_order_enum() -> EnumType { @@ -16,7 +17,7 @@ pub(crate) fn nulls_order_enum() -> EnumType { ) } -pub(crate) fn map_schema_enum_type(ctx: &'_ QuerySchema, enum_id: ast::EnumId) -> EnumType { +pub(crate) fn map_schema_enum_type(ctx: &'_ QuerySchema, enum_id: db::EnumId) -> EnumType { let ident = Identifier::new_model(IdentifierType::Enum(ctx.internal_data_model.clone().zip(enum_id))); let schema_enum = ctx.internal_data_model.clone().zip(enum_id); diff --git a/query-engine/schema/src/output_types.rs b/query-engine/schema/src/output_types.rs index 32956d01d50b..2b7a86dd5162 100644 --- a/query-engine/schema/src/output_types.rs +++ b/query-engine/schema/src/output_types.rs @@ -1,7 +1,7 @@ use super::*; use fmt::Debug; use once_cell::sync::Lazy; -use query_structure::ast::ModelId; +use psl::parser_database as db; use std::{borrow::Cow, fmt}; #[derive(Debug, Clone)] @@ -120,8 +120,7 @@ pub struct ObjectType<'a> { pub(crate) fields: OutputObjectFields<'a>, // Object types can directly map to models. - pub(crate) model: Option, - _heh: (), + pub(crate) model: Option, } impl Debug for ObjectType<'_> { @@ -145,7 +144,6 @@ impl<'a> ObjectType<'a> { identifier, fields: Arc::new(lazy), model: None, - _heh: (), } } @@ -215,7 +213,7 @@ impl<'a> OutputField<'a> { } } - pub fn model(&self) -> Option { + pub fn model(&self) -> Option { self.query_info.as_ref().and_then(|info| info.model) } diff --git a/query-engine/schema/src/query_schema.rs b/query-engine/schema/src/query_schema.rs index e677b10e75a5..ff25c17159fa 100644 --- a/query-engine/schema/src/query_schema.rs +++ b/query-engine/schema/src/query_schema.rs @@ -2,9 +2,9 @@ use crate::{IdentifierType, ObjectType, OutputField}; use psl::{ can_support_relation_load_strategy, datamodel_connector::{Connector, ConnectorCapabilities, ConnectorCapability, JoinStrategySupport, RelationMode}, - has_capability, PreviewFeature, PreviewFeatures, + has_capability, parser_database as db, PreviewFeature, PreviewFeatures, }; -use query_structure::{ast, InternalDataModel}; +use query_structure::InternalDataModel; use std::{collections::HashMap, fmt}; #[derive(Clone, Debug, Hash, Eq, PartialEq)] @@ -218,7 +218,7 @@ impl QuerySchema { /// Designates a specific top-level operation on a corresponding model. #[derive(Debug, Clone, PartialEq, Hash, Eq)] pub struct QueryInfo { - pub model: Option, + pub model: Option, pub tag: QueryTag, } diff --git a/schema-engine/connectors/schema-connector/src/introspection_context.rs b/schema-engine/connectors/schema-connector/src/introspection_context.rs index 54f197935bd3..62f116e5ca94 100644 --- a/schema-engine/connectors/schema-connector/src/introspection_context.rs +++ b/schema-engine/connectors/schema-connector/src/introspection_context.rs @@ -38,13 +38,14 @@ impl IntrospectionContext { ) -> Self { let mut config_blocks = String::new(); - for source in previous_schema.db.ast().sources() { - config_blocks.push_str(&previous_schema.db.source()[source.span.start..source.span.end]); + for source in previous_schema.db.ast_assert_single().sources() { + config_blocks.push_str(&previous_schema.db.source_assert_single()[source.span.start..source.span.end]); config_blocks.push('\n'); } - for generator in previous_schema.db.ast().generators() { - config_blocks.push_str(&previous_schema.db.source()[generator.span.start..generator.span.end]); + for generator in previous_schema.db.ast_assert_single().generators() { + config_blocks + .push_str(&previous_schema.db.source_assert_single()[generator.span.start..generator.span.end]); config_blocks.push('\n'); } @@ -70,7 +71,7 @@ impl IntrospectionContext { /// The string source of the PSL schema file. pub fn schema_string(&self) -> &str { - self.previous_schema.db.source() + self.previous_schema.db.source_assert_single() } /// The configuration block of the PSL schema file. diff --git a/schema-engine/connectors/sql-schema-connector/src/introspection/datamodel_calculator/context.rs b/schema-engine/connectors/sql-schema-connector/src/introspection/datamodel_calculator/context.rs index 32f2ed0a5893..04dcfa7345de 100644 --- a/schema-engine/connectors/sql-schema-connector/src/introspection/datamodel_calculator/context.rs +++ b/schema-engine/connectors/sql-schema-connector/src/introspection/datamodel_calculator/context.rs @@ -11,7 +11,7 @@ use crate::introspection::{ use psl::{ builtin_connectors::*, datamodel_connector::Connector, - parser_database::{ast, walkers}, + parser_database::{self as db, walkers}, Configuration, PreviewFeature, }; use quaint::prelude::SqlFamily; @@ -363,11 +363,11 @@ impl<'a> DatamodelCalculatorContext<'a> { self.introspection_map.relation_names.m2m_relation_name(id) } - pub(crate) fn table_missing_for_model(&self, id: &ast::ModelId) -> bool { + pub(crate) fn table_missing_for_model(&self, id: &db::ModelId) -> bool { self.introspection_map.missing_tables_for_previous_models.contains(id) } - pub(crate) fn view_missing_for_model(&self, id: &ast::ModelId) -> bool { + pub(crate) fn view_missing_for_model(&self, id: &db::ModelId) -> bool { self.introspection_map.missing_views_for_previous_models.contains(id) } diff --git a/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_map.rs b/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_map.rs index 099408e1dcf7..5fd5019213ac 100644 --- a/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_map.rs +++ b/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_map.rs @@ -7,7 +7,7 @@ use crate::introspection::{ introspection_pair::RelationFieldDirection, sanitize_datamodel_names, }; use psl::{ - parser_database::{self, ast, ScalarFieldId}, + parser_database::{self as db, ScalarFieldId}, PreviewFeature, }; use relation_names::RelationNames; @@ -24,15 +24,15 @@ pub(crate) use relation_names::RelationName; /// schema. #[derive(Default)] pub(crate) struct IntrospectionMap<'a> { - pub(crate) existing_enums: HashMap, - pub(crate) existing_models: HashMap, - pub(crate) existing_views: HashMap, - pub(crate) missing_tables_for_previous_models: HashSet, - pub(crate) missing_views_for_previous_models: HashSet, + pub(crate) existing_enums: HashMap, + pub(crate) existing_models: HashMap, + pub(crate) existing_views: HashMap, + pub(crate) missing_tables_for_previous_models: HashSet, + pub(crate) missing_views_for_previous_models: HashSet, pub(crate) existing_model_scalar_fields: HashMap, pub(crate) existing_view_scalar_fields: HashMap, - pub(crate) existing_inline_relations: HashMap, - pub(crate) existing_m2m_relations: HashMap, + pub(crate) existing_inline_relations: HashMap, + pub(crate) existing_m2m_relations: HashMap, pub(crate) relation_names: RelationNames<'a>, pub(crate) inline_relation_positions: Vec<(sql::TableId, sql::ForeignKeyId, RelationFieldDirection)>, pub(crate) m2m_relation_positions: Vec<(sql::TableId, sql::ForeignKeyId, RelationFieldDirection)>, diff --git a/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/enumerator.rs b/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/enumerator.rs index b14c2c51ea30..29fff1f18c36 100644 --- a/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/enumerator.rs +++ b/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/enumerator.rs @@ -1,8 +1,8 @@ use super::IntrospectionPair; use crate::introspection::sanitize_datamodel_names::{EnumVariantName, ModelName}; use psl::{ - parser_database::walkers, - schema_ast::ast::{self, WithDocumentation}, + parser_database::{self as db, walkers}, + schema_ast::ast::WithDocumentation, }; use sql_schema_describer as sql; use std::borrow::Cow; @@ -51,7 +51,7 @@ impl<'a> EnumPair<'a> { /// The position of the enum from the PSL, if existing. Used for /// sorting the enums in the final introspected data model. - pub(crate) fn previous_position(self) -> Option { + pub(crate) fn previous_position(self) -> Option { self.previous.map(|e| e.id) } diff --git a/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/model.rs b/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/model.rs index 13f3b78f88e0..0e907fdbefcd 100644 --- a/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/model.rs +++ b/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/model.rs @@ -1,7 +1,7 @@ use psl::{ datamodel_connector::walker_ext_traits::IndexWalkerExt, - parser_database::walkers, - schema_ast::ast::{self, WithDocumentation}, + parser_database::{self as db, walkers}, + schema_ast::ast::WithDocumentation, }; use sql::postgres::PostgresSchemaExt; use sql_schema_describer as sql; @@ -18,7 +18,7 @@ pub(crate) type ModelPair<'a> = IntrospectionPair<'a, Option ModelPair<'a> { /// The position of the model from the PSL, if existing. Used for /// sorting the models in the final introspected data model. - pub(crate) fn previous_position(self) -> Option { + pub(crate) fn previous_position(self) -> Option { self.previous.map(|m| m.id) } diff --git a/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/view.rs b/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/view.rs index e5b58ebd3cf3..ea7ac6cd30ca 100644 --- a/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/view.rs +++ b/schema-engine/connectors/sql-schema-connector/src/introspection/introspection_pair/view.rs @@ -1,12 +1,10 @@ -use std::borrow::Cow; - +use super::{IdPair, IndexPair, IntrospectionPair, RelationFieldPair, ScalarFieldPair}; use psl::{ - parser_database::walkers, - schema_ast::ast::{self, WithDocumentation}, + parser_database::{self as db, walkers}, + schema_ast::ast::WithDocumentation, }; use sql_schema_describer as sql; - -use super::{IdPair, IndexPair, IntrospectionPair, RelationFieldPair, ScalarFieldPair}; +use std::borrow::Cow; /// Comparing a PSL view (which currently utilizes the /// model structure due to them being completely the same @@ -16,7 +14,7 @@ pub(crate) type ViewPair<'a> = IntrospectionPair<'a, Option ViewPair<'a> { /// The position of the view from the PSL, if existing. Used for /// sorting the views in the final introspected data model. - pub(crate) fn previous_position(self) -> Option { + pub(crate) fn previous_position(self) -> Option { self.previous.map(|m| m.id) } diff --git a/schema-engine/connectors/sql-schema-connector/src/introspection/rendering/enums.rs b/schema-engine/connectors/sql-schema-connector/src/introspection/rendering/enums.rs index fe8f2a96807d..11c87ab7de09 100644 --- a/schema-engine/connectors/sql-schema-connector/src/introspection/rendering/enums.rs +++ b/schema-engine/connectors/sql-schema-connector/src/introspection/rendering/enums.rs @@ -5,11 +5,11 @@ use crate::introspection::{ sanitize_datamodel_names, }; use datamodel_renderer::datamodel as renderer; -use psl::parser_database::ast; +use psl::parser_database as db; /// Render all enums. pub(super) fn render<'a>(ctx: &'a DatamodelCalculatorContext<'a>, rendered: &mut renderer::Datamodel<'a>) { - let mut all_enums: Vec<(Option, renderer::Enum<'_>)> = Vec::new(); + let mut all_enums: Vec<(Option, renderer::Enum<'_>)> = Vec::new(); for pair in ctx.enum_pairs() { all_enums.push((pair.previous_position(), render_enum(pair))) diff --git a/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator.rs b/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator.rs index 3b36829cfcf0..5ef3bb69529a 100644 --- a/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator.rs +++ b/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator.rs @@ -6,7 +6,7 @@ use crate::{flavour::SqlFlavour, SqlDatabaseSchema}; use psl::{ datamodel_connector::walker_ext_traits::*, parser_database::{ - ast, + self as db, ast, walkers::{ModelWalker, ScalarFieldWalker}, ReferentialAction, ScalarFieldType, ScalarType, SortOrder, }, @@ -61,7 +61,7 @@ fn push_model_tables(ctx: &mut Context<'_>) { .schema .describer_schema .push_table(model.database_name().to_owned(), namespace_id, None); - ctx.model_id_to_table_id.insert(model.model_id(), table_id); + ctx.model_id_to_table_id.insert(model.id, table_id); for field in model.scalar_fields() { push_column_for_scalar_field(field, table_id, ctx); @@ -138,8 +138,8 @@ fn push_inline_relations(ctx: &mut Context<'_>) { let relation_field = relation .forward_relation_field() .expect("Expecting a complete relation in sql_schmea_calculator"); - let referencing_model = ctx.model_id_to_table_id[&relation_field.model().model_id()]; - let referenced_model = ctx.model_id_to_table_id[&relation.referenced_model().model_id()]; + let referencing_model = ctx.model_id_to_table_id[&relation_field.model().id]; + let referenced_model = ctx.model_id_to_table_id[&relation.referenced_model().id]; let on_delete_action = relation_field.explicit_on_delete().unwrap_or_else(|| { relation_field.default_on_delete_action( ctx.datamodel.configuration.relation_mode().unwrap_or_default(), @@ -193,9 +193,9 @@ fn push_relation_tables(ctx: &mut Context<'_>) { .take(datamodel.configuration.max_identifier_length()) .collect::(); let model_a = m2m.model_a(); - let model_a_table_id = ctx.model_id_to_table_id[&model_a.model_id()]; + let model_a_table_id = ctx.model_id_to_table_id[&model_a.id]; let model_b = m2m.model_b(); - let model_b_table_id = ctx.model_id_to_table_id[&model_b.model_id()]; + let model_b_table_id = ctx.model_id_to_table_id[&model_b.id]; let model_a_column = m2m.column_a_name(); let model_b_column = m2m.column_b_name(); let model_a_id = model_a.primary_key().unwrap().fields().next().unwrap(); @@ -300,7 +300,7 @@ fn push_relation_tables(ctx: &mut Context<'_>) { if ctx.datamodel.relation_mode().uses_foreign_keys() { let fkid = ctx.schema.describer_schema.push_foreign_key( Some(model_a_fk_name), - [table_id, ctx.model_id_to_table_id[&model_a.model_id()]], + [table_id, ctx.model_id_to_table_id[&model_a.id]], [flavour.m2m_foreign_key_action(model_a, model_b); 2], ); @@ -319,7 +319,7 @@ fn push_relation_tables(ctx: &mut Context<'_>) { let fkid = ctx.schema.describer_schema.push_foreign_key( Some(model_b_fk_name), - [table_id, ctx.model_id_to_table_id[&model_b.model_id()]], + [table_id, ctx.model_id_to_table_id[&model_b.id]], [flavour.m2m_foreign_key_action(model_a, model_b); 2], ); @@ -354,7 +354,7 @@ fn push_column_for_scalar_field(field: ScalarFieldWalker<'_>, table_id: sql::Tab fn push_column_for_model_enum_scalar_field( field: ScalarFieldWalker<'_>, - enum_id: ast::EnumId, + enum_id: db::EnumId, table_id: sql::TableId, ctx: &mut Context<'_>, ) { @@ -582,8 +582,8 @@ pub(crate) struct Context<'a> { schema: &'a mut SqlDatabaseSchema, flavour: &'a dyn SqlFlavour, schemas: HashMap<&'a str, sql::NamespaceId>, - model_id_to_table_id: HashMap, - enum_ids: HashMap, + model_id_to_table_id: HashMap, + enum_ids: HashMap, } impl Context<'_> { diff --git a/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator/sql_schema_calculator_flavour/mssql.rs b/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator/sql_schema_calculator_flavour/mssql.rs index 51a8f5ef54be..7e6b94a761ab 100644 --- a/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator/sql_schema_calculator_flavour/mssql.rs +++ b/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator/sql_schema_calculator_flavour/mssql.rs @@ -27,7 +27,7 @@ impl SqlSchemaCalculatorFlavour for MssqlFlavour { let mut data = MssqlSchemaExt::default(); for model in context.datamodel.db.walk_models() { - let table_id = context.model_id_to_table_id[&model.model_id()]; + let table_id = context.model_id_to_table_id[&model.id]; let table = context.schema.walk(table_id); if model .primary_key() diff --git a/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator/sql_schema_calculator_flavour/postgres.rs b/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator/sql_schema_calculator_flavour/postgres.rs index 656fe432a970..c2193252be99 100644 --- a/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator/sql_schema_calculator_flavour/postgres.rs +++ b/schema-engine/connectors/sql-schema-connector/src/sql_schema_calculator/sql_schema_calculator_flavour/postgres.rs @@ -69,7 +69,7 @@ impl SqlSchemaCalculatorFlavour for PostgresFlavour { } for model in db.walk_models() { - let table_id = context.model_id_to_table_id[&model.model_id()]; + let table_id = context.model_id_to_table_id[&model.id]; // Add index algorithms and opclasses. for index in model.indexes() { diff --git a/schema-engine/core/src/state.rs b/schema-engine/core/src/state.rs index 9143ef1fb767..c376cb300fba 100644 --- a/schema-engine/core/src/state.rs +++ b/schema-engine/core/src/state.rs @@ -177,7 +177,8 @@ impl EngineState { return Err(ConnectorError::from_msg("Missing --datamodel".to_owned())); }; - self.with_connector_for_schema(schema.db.source(), None, f).await + self.with_connector_for_schema(schema.db.source_assert_single(), None, f) + .await } } diff --git a/schema-engine/sql-introspection-tests/tests/referential_actions/mysql.rs b/schema-engine/sql-introspection-tests/tests/referential_actions/mysql.rs index 7e184686c146..3f7ec20f5423 100644 --- a/schema-engine/sql-introspection-tests/tests/referential_actions/mysql.rs +++ b/schema-engine/sql-introspection-tests/tests/referential_actions/mysql.rs @@ -55,7 +55,7 @@ async fn introspect_set_default_should_warn(api: &mut TestApi) -> TestResult { let warning_messages = schema .diagnostics - .warnings_to_pretty_string("schema.prisma", schema.db.source()); + .warnings_to_pretty_string("schema.prisma", schema.db.source_assert_single()); let expected_validation = expect![[r#" warning: MySQL does not actually support the `SetDefault` referential action, so using it may result in unexpected errors. Read more at https://pris.ly/d/mysql-set-default