diff --git a/common/src/sql/dbinit.sql b/common/src/sql/dbinit.sql index 50458834988..c6576d78f6b 100644 --- a/common/src/sql/dbinit.sql +++ b/common/src/sql/dbinit.sql @@ -721,7 +721,10 @@ CREATE TABLE omicron.public.vpc ( /* Used to ensure that two requests do not concurrently modify the VPC's firewall */ - firewall_gen INT NOT NULL + firewall_gen INT NOT NULL, + + /* Child-resource generation number for VPC Subnets. */ + subnet_gen INT8 NOT NULL ); CREATE UNIQUE INDEX ON omicron.public.vpc ( @@ -745,6 +748,8 @@ CREATE TABLE omicron.public.vpc_subnet ( /* Indicates that the object has been deleted */ time_deleted TIMESTAMPTZ, vpc_id UUID NOT NULL, + /* Child resource creation generation number */ + rcgen INT8 NOT NULL, ipv4_block INET NOT NULL, ipv6_block INET NOT NULL ); diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index c5f7406a51e..43a0b938940 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -439,6 +439,7 @@ table! { ipv6_prefix -> Inet, dns_name -> Text, firewall_gen -> Int8, + subnet_gen -> Int8, } } @@ -451,6 +452,7 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, vpc_id -> Uuid, + rcgen -> Int8, ipv4_block -> Inet, ipv6_block -> Inet, } diff --git a/nexus/db-model/src/vpc.rs b/nexus/db-model/src/vpc.rs index 695b3b52c23..0ea38a03d34 100644 --- a/nexus/db-model/src/vpc.rs +++ b/nexus/db-model/src/vpc.rs @@ -2,9 +2,9 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{Generation, Ipv6Net, Name, VpcFirewallRule}; +use super::{Generation, Ipv6Net, Name, VpcFirewallRule, VpcSubnet}; use crate::collection::DatastoreCollectionConfig; -use crate::schema::{vpc, vpc_firewall_rule}; +use crate::schema::{vpc, vpc_firewall_rule, vpc_subnet}; use crate::Vni; use chrono::{DateTime, Utc}; use db_macros::Resource; @@ -31,6 +31,9 @@ pub struct Vpc { /// firewall generation number, used as a child resource generation number /// per RFD 192 pub firewall_gen: Generation, + + /// VPC Subnet generation number + pub subnet_gen: Generation, } impl From for views::Vpc { @@ -58,6 +61,7 @@ pub struct IncompleteVpc { pub ipv6_prefix: IpNetwork, pub dns_name: Name, pub firewall_gen: Generation, + pub subnet_gen: Generation, } impl IncompleteVpc { @@ -92,6 +96,7 @@ impl IncompleteVpc { ipv6_prefix, dns_name: params.dns_name.into(), firewall_gen: Generation::new(), + subnet_gen: Generation::new(), }) } } @@ -103,6 +108,13 @@ impl DatastoreCollectionConfig for Vpc { type CollectionIdColumn = vpc_firewall_rule::dsl::vpc_id; } +impl DatastoreCollectionConfig for Vpc { + type CollectionId = Uuid; + type GenerationNumberColumn = vpc::dsl::subnet_gen; + type CollectionTimeDeletedColumn = vpc::dsl::time_deleted; + type CollectionIdColumn = vpc_subnet::dsl::vpc_id; +} + #[derive(AsChangeset)] #[diesel(table_name = vpc)] pub struct VpcUpdate { diff --git a/nexus/db-model/src/vpc_subnet.rs b/nexus/db-model/src/vpc_subnet.rs index 21fcf93cc11..a572f6f535e 100644 --- a/nexus/db-model/src/vpc_subnet.rs +++ b/nexus/db-model/src/vpc_subnet.rs @@ -2,8 +2,12 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use super::Generation; use super::{Ipv4Net, Ipv6Net, Name}; +use crate::collection::DatastoreCollectionConfig; +use crate::schema::network_interface; use crate::schema::vpc_subnet; +use crate::NetworkInterface; use chrono::{DateTime, Utc}; use db_macros::Resource; use nexus_types::external_api::params; @@ -20,6 +24,7 @@ pub struct VpcSubnet { pub identity: VpcSubnetIdentity, pub vpc_id: Uuid, + pub rcgen: Generation, pub ipv4_block: Ipv4Net, pub ipv6_block: Ipv6Net, } @@ -40,6 +45,7 @@ impl VpcSubnet { Self { identity, vpc_id, + rcgen: Generation::new(), ipv4_block: Ipv4Net(ipv4_block), ipv6_block: Ipv6Net(ipv6_block), } @@ -105,3 +111,10 @@ impl From for VpcSubnetUpdate { } } } + +impl DatastoreCollectionConfig for VpcSubnet { + type CollectionId = Uuid; + type GenerationNumberColumn = vpc_subnet::dsl::rcgen; + type CollectionTimeDeletedColumn = vpc_subnet::dsl::time_deleted; + type CollectionIdColumn = network_interface::dsl::subnet_id; +} diff --git a/nexus/src/app/vpc.rs b/nexus/src/app/vpc.rs index c2ad4b781e0..9c6950abbaa 100644 --- a/nexus/src/app/vpc.rs +++ b/nexus/src/app/vpc.rs @@ -261,10 +261,22 @@ impl super::Nexus { LookupType::ById(db_vpc.system_router_id), ); + // Possibly delete the VPC, then the router and firewall. + // + // We must delete the VPC first. This will fail if the VPC still + // contains at least one subnet, since those are independent containers + // that track network interfaces as child resources. If we delete the + // router first, it'll succeed even if the VPC contains Subnets, which + // means the router is now gone from an otherwise-live subnet. + // + // This is a good example of need for the original comment: + // // TODO: This should eventually use a saga to call the // networking subsystem to have it clean up the networking resources + self.db_datastore + .project_delete_vpc(opctx, &db_vpc, &authz_vpc) + .await?; self.db_datastore.vpc_delete_router(&opctx, &authz_vpc_router).await?; - self.db_datastore.project_delete_vpc(opctx, &authz_vpc).await?; // Delete all firewall rules after deleting the VPC, to ensure no // firewall rules get added between rules deletion and VPC deletion. diff --git a/nexus/src/app/vpc_subnet.rs b/nexus/src/app/vpc_subnet.rs index cacc2dc441e..a162377b260 100644 --- a/nexus/src/app/vpc_subnet.rs +++ b/nexus/src/app/vpc_subnet.rs @@ -261,14 +261,17 @@ impl super::Nexus { vpc_name: &Name, subnet_name: &Name, ) -> DeleteResult { - let (.., authz_subnet) = LookupPath::new(opctx, &self.db_datastore) - .organization_name(organization_name) - .project_name(project_name) - .vpc_name(vpc_name) - .vpc_subnet_name(subnet_name) - .lookup_for(authz::Action::Delete) - .await?; - self.db_datastore.vpc_delete_subnet(opctx, &authz_subnet).await + let (.., authz_subnet, db_subnet) = + LookupPath::new(opctx, &self.db_datastore) + .organization_name(organization_name) + .project_name(project_name) + .vpc_name(vpc_name) + .vpc_subnet_name(subnet_name) + .fetch_for(authz::Action::Delete) + .await?; + self.db_datastore + .vpc_delete_subnet(opctx, &db_subnet, &authz_subnet) + .await } pub async fn subnet_list_network_interfaces( diff --git a/nexus/src/db/datastore/network_interface.rs b/nexus/src/db/datastore/network_interface.rs index f1b1f99530b..e9cab811ba6 100644 --- a/nexus/src/db/datastore/network_interface.rs +++ b/nexus/src/db/datastore/network_interface.rs @@ -8,6 +8,8 @@ use super::DataStore; use crate::authz; use crate::context::OpContext; use crate::db; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; @@ -16,6 +18,7 @@ use crate::db::model::Instance; use crate::db::model::Name; use crate::db::model::NetworkInterface; use crate::db::model::NetworkInterfaceUpdate; +use crate::db::model::VpcSubnet; use crate::db::pagination::paginated; use crate::db::queries::network_interface; use async_bb8_diesel::AsyncConnection; @@ -27,6 +30,8 @@ use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; use omicron_common::api::external::Error; use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupType; +use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use sled_agent_client::types as sled_client_types; @@ -56,19 +61,31 @@ impl DataStore { interface: IncompleteNetworkInterface, ) -> Result { use db::schema::network_interface::dsl; + let subnet_id = interface.subnet.identity.id; let query = network_interface::InsertQuery::new(interface.clone()); - diesel::insert_into(dsl::network_interface) - .values(query) - .returning(NetworkInterface::as_returning()) - .get_result_async( - self.pool_authorized(opctx) - .await - .map_err(network_interface::InsertError::External)?, - ) - .await - .map_err(|e| { + VpcSubnet::insert_resource( + subnet_id, + diesel::insert_into(dsl::network_interface).values(query), + ) + .insert_and_get_result_async( + self.pool_authorized(opctx) + .await + .map_err(network_interface::InsertError::External)?, + ) + .await + .map_err(|e| match e { + AsyncInsertError::CollectionNotFound => { + network_interface::InsertError::External( + Error::ObjectNotFound { + type_name: ResourceType::VpcSubnet, + lookup_type: LookupType::ById(subnet_id), + }, + ) + } + AsyncInsertError::DatabaseError(e) => { network_interface::InsertError::from_pool(e, &interface) - }) + } + }) } /// Delete all network interfaces attached to the given instance. diff --git a/nexus/src/db/datastore/vpc.rs b/nexus/src/db/datastore/vpc.rs index fb570815ae4..946207f65a3 100644 --- a/nexus/src/db/datastore/vpc.rs +++ b/nexus/src/db/datastore/vpc.rs @@ -11,6 +11,7 @@ use crate::db; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; use crate::db::collection_insert::SyncInsertError; +use crate::db::error::diesel_pool_result_optional; use crate::db::error::public_error_from_diesel_pool; use crate::db::error::ErrorHandler; use crate::db::error::TransactionError; @@ -43,6 +44,7 @@ use omicron_common::api::external::ListResultVec; use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; +use uuid::Uuid; impl DataStore { pub async fn project_list_vpcs( @@ -128,11 +130,13 @@ impl DataStore { pub async fn project_delete_vpc( &self, opctx: &OpContext, + db_vpc: &Vpc, authz_vpc: &authz::Vpc, ) -> DeleteResult { opctx.authorize(authz::Action::Delete, authz_vpc).await?; use db::schema::vpc::dsl; + use db::schema::vpc_subnet; // Note that we don't ensure the firewall rules are empty here, because // we allow deleting VPCs with firewall rules present. Inserting new @@ -140,13 +144,42 @@ impl DataStore { // associated with the VPC row, since we use the collection insert CTE // pattern to add firewall rules. + // We _do_ need to check for the existence of subnets. VPC Subnets + // cannot be deleted while there are network interfaces in them + // (associations between an instance and a VPC Subnet). Because VPC + // Subnets are themselves containers for resources that we don't want to + // auto-delete (now, anyway), we've got to check there aren't any. We + // _might_ be able to make this a check for NICs, rather than subnets, + // but we can't have NICs be a child of both tables at this point, and + // we need to prevent VPC Subnets from being deleted while they have + // NICs in them as well. + if diesel_pool_result_optional( + vpc_subnet::dsl::vpc_subnet + .filter(vpc_subnet::dsl::vpc_id.eq(authz_vpc.id())) + .filter(vpc_subnet::dsl::time_deleted.is_null()) + .select(vpc_subnet::dsl::id) + .limit(1) + .first_async::(self.pool_authorized(opctx).await?) + .await, + ) + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .is_some() + { + return Err(Error::InvalidRequest { + message: String::from( + "VPC cannot be deleted while VPC Subnets exist", + ), + }); + } + + // Delete the VPC, conditional on the subnet_gen not having changed. let now = Utc::now(); - diesel::update(dsl::vpc) + let updated_rows = diesel::update(dsl::vpc) .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_vpc.id())) + .filter(dsl::subnet_gen.eq(db_vpc.subnet_gen)) .set(dsl::time_deleted.eq(now)) - .returning(Vpc::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .execute_async(self.pool_authorized(opctx).await?) .await .map_err(|e| { public_error_from_diesel_pool( @@ -154,7 +187,15 @@ impl DataStore { ErrorHandler::NotFoundByResource(authz_vpc), ) })?; - Ok(()) + if updated_rows == 0 { + Err(Error::InvalidRequest { + message: String::from( + "deletion failed to to concurrent modification", + ), + }) + } else { + Ok(()) + } } pub async fn vpc_list_firewall_rules( @@ -328,18 +369,43 @@ impl DataStore { pub async fn vpc_delete_subnet( &self, opctx: &OpContext, + db_subnet: &VpcSubnet, authz_subnet: &authz::VpcSubnet, ) -> DeleteResult { opctx.authorize(authz::Action::Delete, authz_subnet).await?; + use db::schema::network_interface; use db::schema::vpc_subnet::dsl; + + // Verify there are no child network interfaces in this VPC Subnet + if diesel_pool_result_optional( + network_interface::dsl::network_interface + .filter(network_interface::dsl::subnet_id.eq(authz_subnet.id())) + .filter(network_interface::dsl::time_deleted.is_null()) + .select(network_interface::dsl::id) + .limit(1) + .first_async::(self.pool_authorized(opctx).await?) + .await, + ) + .map_err(|e| public_error_from_diesel_pool(e, ErrorHandler::Server))? + .is_some() + { + return Err(Error::InvalidRequest { + message: String::from( + "VPC Subnet cannot be deleted while instances \ + with network interfaces in the subnet exist", + ), + }); + } + + // Delete the subnet, conditional on the rcgen not having changed. let now = Utc::now(); - diesel::update(dsl::vpc_subnet) + let updated_rows = diesel::update(dsl::vpc_subnet) .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_subnet.id())) + .filter(dsl::rcgen.eq(db_subnet.rcgen)) .set(dsl::time_deleted.eq(now)) - .returning(VpcSubnet::as_returning()) - .get_result_async(self.pool_authorized(opctx).await?) + .execute_async(self.pool_authorized(opctx).await?) .await .map_err(|e| { public_error_from_diesel_pool( @@ -347,7 +413,15 @@ impl DataStore { ErrorHandler::NotFoundByResource(authz_subnet), ) })?; - Ok(()) + if updated_rows == 0 { + return Err(Error::InvalidRequest { + message: String::from( + "deletion failed to to concurrent modification", + ), + }); + } else { + Ok(()) + } } pub async fn vpc_update_subnet( @@ -463,8 +537,7 @@ impl DataStore { .filter(dsl::time_deleted.is_null()) .filter(dsl::id.eq(authz_router.id())) .set(dsl::time_deleted.eq(now)) - .returning(VpcRouter::as_returning()) - .get_result_async(self.pool()) + .execute_async(self.pool()) .await .map_err(|e| { public_error_from_diesel_pool( diff --git a/nexus/src/db/queries/network_interface.rs b/nexus/src/db/queries/network_interface.rs index 06afd96f89f..177a9d63aab 100644 --- a/nexus/src/db/queries/network_interface.rs +++ b/nexus/src/db/queries/network_interface.rs @@ -1474,6 +1474,7 @@ mod tests { use super::last_address_offset; use super::InsertError; use super::MAX_NICS_PER_INSTANCE; + use super::NUM_INITIAL_RESERVED_IP_ADDRESSES; use crate::context::OpContext; use crate::db::datastore::DataStore; use crate::db::identity::Resource; @@ -1485,8 +1486,11 @@ mod tests { use crate::db::model::VpcSubnet; use crate::external_api::params::InstanceCreate; use crate::external_api::params::InstanceNetworkInterfaceAttachment; + use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use dropshot::test_util::LogContext; + use ipnetwork::Ipv4Network; + use ipnetwork::Ipv6Network; use nexus_test_utils::db::test_setup_database; use omicron_common::api::external; use omicron_common::api::external::ByteCount; @@ -1502,6 +1506,8 @@ mod tests { use omicron_test_utils::dev::db::CockroachInstance; use std::convert::TryInto; use std::net::IpAddr; + use std::net::Ipv4Addr; + use std::net::Ipv6Addr; use std::sync::Arc; use uuid::Uuid; @@ -1579,48 +1585,61 @@ mod tests { instance } - // VPC with two VPC Subnets, for testing behavior of NIC queries. + // VPC with several distinct subnets. struct Network { vpc_id: Uuid, - subnets: [VpcSubnet; 2], + subnets: Vec, } impl Network { - // Create a VPC with two distinct VPC Subnets. - fn new() -> Self { + // Create a VPC with N distinct VPC Subnets. + fn new(n_subnets: u8) -> Self { let vpc_id = Uuid::new_v4(); - let subnet1 = VpcSubnet::new( - Uuid::new_v4(), - vpc_id, - IdentityMetadataCreateParams { - name: String::from("first-subnet").try_into().unwrap(), - description: String::from("first test subnet"), - }, - Ipv4Net("172.30.0.0/28".parse().unwrap()), - Ipv6Net("fd12:3456:7890::/64".parse().unwrap()), - ); - let subnet2 = VpcSubnet::new( - Uuid::new_v4(), - vpc_id, - IdentityMetadataCreateParams { - name: String::from("second-subnet").try_into().unwrap(), - description: String::from("second test subnet"), - }, - Ipv4Net("172.31.0.0/28".parse().unwrap()), - Ipv6Net("fd12:3456:7891::/64".parse().unwrap()), - ); - Self { vpc_id, subnets: [subnet1, subnet2] } + let mut subnets = Vec::with_capacity(n_subnets as _); + for i in 0..n_subnets { + let ipv4net = Ipv4Net( + Ipv4Network::new(Ipv4Addr::new(172, 30, 0, i), 28).unwrap(), + ); + let ipv6net = Ipv6Net( + Ipv6Network::new( + Ipv6Addr::new( + 0xfd12, + 0x3456, + 0x7890, + i.into(), + 0, + 0, + 0, + 0, + ), + 64, + ) + .unwrap(), + ); + let subnet = VpcSubnet::new( + Uuid::new_v4(), + vpc_id, + IdentityMetadataCreateParams { + name: format!("subnet-{i}").try_into().unwrap(), + description: String::from("first test subnet"), + }, + ipv4net, + ipv6net, + ); + subnets.push(subnet); + } + Self { vpc_id, subnets } } - fn available_ipv4_addresses(&self) -> [usize; 2] { - [ - self.subnets[0].ipv4_block.size() as usize - - nexus_defaults::NUM_INITIAL_RESERVED_IP_ADDRESSES - - 1, - self.subnets[1].ipv4_block.size() as usize - - nexus_defaults::NUM_INITIAL_RESERVED_IP_ADDRESSES - - 1, - ] + fn available_ipv4_addresses(&self) -> Vec { + self.subnets + .iter() + .map(|subnet| { + subnet.ipv4_block.size() as usize + - NUM_INITIAL_RESERVED_IP_ADDRESSES + - 1 + }) + .collect() } } @@ -1635,7 +1654,7 @@ mod tests { } impl TestContext { - async fn new(test_name: &str) -> Self { + async fn new(test_name: &str, n_subnets: u8) -> Self { let logctx = dev::test_setup_log(test_name); let log = logctx.log.new(o!()); let db = test_setup_database(&log).await; @@ -1645,14 +1664,19 @@ mod tests { Arc::new(crate::db::DataStore::new(Arc::clone(&pool))); let opctx = OpContext::for_tests(log.new(o!()), db_datastore.clone()); - Self { - logctx, - opctx, - db, - db_datastore, - net1: Network::new(), - net2: Network::new(), + + use crate::db::schema::vpc_subnet::dsl::vpc_subnet; + let p = db_datastore.pool_authorized(&opctx).await.unwrap(); + let net1 = Network::new(n_subnets); + let net2 = Network::new(n_subnets); + for subnet in net1.subnets.iter().chain(net2.subnets.iter()) { + diesel::insert_into(vpc_subnet) + .values(subnet.clone()) + .execute_async(p) + .await + .unwrap(); } + Self { logctx, opctx, db, db_datastore, net1, net2 } } async fn success(mut self) { @@ -1676,7 +1700,7 @@ mod tests { #[tokio::test] async fn test_insert_running_instance_fails() { let context = - TestContext::new("test_insert_running_instance_fails").await; + TestContext::new("test_insert_running_instance_fails", 2).await; let instance = context.create_instance(external::InstanceState::Running).await; let instance_id = instance.id(); @@ -1707,7 +1731,7 @@ mod tests { #[tokio::test] async fn test_insert_request_exact_ip() { - let context = TestContext::new("test_insert_request_exact_ip").await; + let context = TestContext::new("test_insert_request_exact_ip", 2).await; let instance = context.create_instance(external::InstanceState::Stopped).await; let instance_id = instance.id(); @@ -1743,7 +1767,8 @@ mod tests { #[tokio::test] async fn test_insert_no_instance_fails() { - let context = TestContext::new("test_insert_no_instance_fails").await; + let context = + TestContext::new("test_insert_no_instance_fails", 2).await; let interface = IncompleteNetworkInterface::new( Uuid::new_v4(), Uuid::new_v4(), @@ -1774,11 +1799,11 @@ mod tests { #[tokio::test] async fn test_insert_sequential_ip_allocation() { let context = - TestContext::new("test_insert_sequential_ip_allocation").await; + TestContext::new("test_insert_sequential_ip_allocation", 2).await; let addresses = context.net1.subnets[0] .ipv4_block .iter() - .skip(nexus_defaults::NUM_INITIAL_RESERVED_IP_ADDRESSES); + .skip(NUM_INITIAL_RESERVED_IP_ADDRESSES); for (i, expected_address) in addresses.take(2).enumerate() { let instance = @@ -1816,7 +1841,7 @@ mod tests { #[tokio::test] async fn test_insert_request_same_ip_fails() { let context = - TestContext::new("test_insert_request_same_ip_fails").await; + TestContext::new("test_insert_request_same_ip_fails", 2).await; let instance = context.create_instance(external::InstanceState::Stopped).await; @@ -1870,7 +1895,7 @@ mod tests { #[tokio::test] async fn test_insert_with_duplicate_name_fails() { let context = - TestContext::new("test_insert_with_duplicate_name_fails").await; + TestContext::new("test_insert_with_duplicate_name_fails", 2).await; let instance = context.create_instance(external::InstanceState::Stopped).await; let interface = IncompleteNetworkInterface::new( @@ -1922,7 +1947,7 @@ mod tests { #[tokio::test] async fn test_insert_same_vpc_subnet_fails() { let context = - TestContext::new("test_insert_same_vpc_subnet_fails").await; + TestContext::new("test_insert_same_vpc_subnet_fails", 2).await; let instance = context.create_instance(external::InstanceState::Stopped).await; let interface = IncompleteNetworkInterface::new( @@ -1967,7 +1992,8 @@ mod tests { #[tokio::test] async fn test_insert_multiple_vpcs_fails() { - let context = TestContext::new("test_insert_multiple_vpcs_fails").await; + let context = + TestContext::new("test_insert_multiple_vpcs_fails", 2).await; let instance = context.create_instance(external::InstanceState::Stopped).await; let interface = IncompleteNetworkInterface::new( @@ -2021,7 +2047,7 @@ mod tests { // hitting the per-instance limit of NICs. #[tokio::test] async fn test_detect_ip_exhaustion() { - let context = TestContext::new("test_detect_ip_exhaustion").await; + let context = TestContext::new("test_detect_ip_exhaustion", 2).await; let n_interfaces = context.net1.available_ipv4_addresses()[0]; for _ in 0..n_interfaces { let instance = @@ -2078,7 +2104,8 @@ mod tests { #[tokio::test] async fn test_insert_multiple_vpc_subnets_succeeds() { let context = - TestContext::new("test_insert_multiple_vpc_subnets_succeeds").await; + TestContext::new("test_insert_multiple_vpc_subnets_succeeds", 2) + .await; let instance = context.create_instance(external::InstanceState::Stopped).await; for (i, subnet) in context.net1.subnets.iter().enumerate() { @@ -2135,28 +2162,13 @@ mod tests { async fn test_limit_number_of_interfaces_per_instance_query() { let context = TestContext::new( "test_limit_number_of_interfaces_per_instance_query", + MAX_NICS_PER_INSTANCE as u8 + 1, ) .await; let instance = context.create_instance(external::InstanceState::Stopped).await; for slot in 0..MAX_NICS_PER_INSTANCE { - // Each NIC must be in a different VPC Subnet. - // - // Note that this subnet is completely fictitious and nonsensical. - // It doesn't actually exist in the database, since that would - // violate the name-uniqueness and IP subnet-overlap checks. - let subnet = VpcSubnet::new( - Uuid::new_v4(), - context.net1.vpc_id, - IdentityMetadataCreateParams { - name: context.net1.subnets[0].name().clone(), - description: context.net1.subnets[0] - .description() - .to_string(), - }, - *context.net1.subnets[0].ipv4_block, - *context.net1.subnets[0].ipv6_block, - ); + let subnet = &context.net1.subnets[slot]; let interface = IncompleteNetworkInterface::new( Uuid::new_v4(), instance.id(), @@ -2194,21 +2206,11 @@ mod tests { } // The next one should fail - let subnet = VpcSubnet::new( - Uuid::new_v4(), - context.net1.vpc_id, - IdentityMetadataCreateParams { - name: context.net1.subnets[0].name().clone(), - description: context.net1.subnets[0].description().to_string(), - }, - *context.net1.subnets[0].ipv4_block, - *context.net1.subnets[0].ipv6_block, - ); let interface = IncompleteNetworkInterface::new( Uuid::new_v4(), instance.id(), context.net1.vpc_id, - subnet, + context.net1.subnets.last().unwrap().clone(), IdentityMetadataCreateParams { name: "interface-8".parse().unwrap(), description: String::from("description"), diff --git a/nexus/src/db/queries/vpc.rs b/nexus/src/db/queries/vpc.rs index 24e6d8e8658..cbdccefe045 100644 --- a/nexus/src/db/queries/vpc.rs +++ b/nexus/src/db/queries/vpc.rs @@ -140,7 +140,16 @@ impl QueryFragment for InsertVpcQuery { &self.vpc.firewall_gen, )?; out.push_sql(" AS "); - out.push_identifier(dsl::ipv6_prefix::NAME) + out.push_identifier(dsl::firewall_gen::NAME)?; + out.push_sql(", "); + + out.push_bind_param::( + &self.vpc.subnet_gen, + )?; + out.push_sql(" AS "); + out.push_identifier(dsl::subnet_gen::NAME)?; + + Ok(()) } } @@ -186,6 +195,8 @@ impl QueryFragment for InsertVpcQueryValues { out.push_identifier(dsl::ipv6_prefix::NAME)?; out.push_sql(", "); out.push_identifier(dsl::firewall_gen::NAME)?; + out.push_sql(", "); + out.push_identifier(dsl::subnet_gen::NAME)?; out.push_sql(")"); self.0.walk_ast(out) } diff --git a/nexus/src/db/queries/vpc_subnet.rs b/nexus/src/db/queries/vpc_subnet.rs index 842f4c62482..b0ed18d58ae 100644 --- a/nexus/src/db/queries/vpc_subnet.rs +++ b/nexus/src/db/queries/vpc_subnet.rs @@ -191,7 +191,8 @@ fn push_null_if_overlapping_ip_range<'a>( /// time_created, /// time_modified, /// time_deleted, -/// vpc_id +/// vpc_id, +/// rcgen /// ) AS (VALUES ( /// , /// , @@ -200,6 +201,7 @@ fn push_null_if_overlapping_ip_range<'a>( /// , /// NULL::TIMESTAMPTZ, /// , +/// 0 /// )), /// candidate_ipv4(ipv4_block) AS ( /// SELECT( @@ -276,6 +278,8 @@ impl QueryFragment for FilterConflictingVpcSubnetRangesQuery { out.push_identifier(dsl::time_deleted::NAME)?; out.push_sql(", "); out.push_identifier(dsl::vpc_id::NAME)?; + out.push_sql(","); + out.push_identifier(dsl::rcgen::NAME)?; out.push_sql(") AS (VALUES ("); out.push_bind_param::(&self.subnet.identity.id)?; out.push_sql(", "); @@ -297,7 +301,7 @@ impl QueryFragment for FilterConflictingVpcSubnetRangesQuery { out.push_sql(", "); out.push_sql("NULL::TIMESTAMPTZ, "); out.push_bind_param::(&self.subnet.vpc_id)?; - out.push_sql(")), "); + out.push_sql(", 0)), "); // Push the candidate IPv4 and IPv6 selection subqueries, which return // NULL if the corresponding address range overlaps. @@ -379,6 +383,8 @@ impl QueryFragment for FilterConflictingVpcSubnetRangesQueryValues { out.push_sql(", "); out.push_identifier(dsl::vpc_id::NAME)?; out.push_sql(", "); + out.push_identifier(dsl::rcgen::NAME)?; + out.push_sql(", "); out.push_identifier(dsl::ipv4_block::NAME)?; out.push_sql(", "); out.push_identifier(dsl::ipv6_block::NAME)?; diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index f329306df30..a1a96a156d8 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -2537,12 +2537,13 @@ async fn instances_list( } /// Convenience function for starting, stopping, or rebooting an instance. -enum InstanceOp { +pub enum InstanceOp { Start, Stop, Reboot, } -async fn instance_post( + +pub async fn instance_post( client: &ClientTestContext, instance_url: &str, which: InstanceOp, @@ -2590,7 +2591,7 @@ fn instances_eq(instance1: &Instance, instance2: &Instance) { /// have to look up the instance, then get the sled agent associated with that /// instance, and then tell it to finish simulating whatever async transition is /// going on. -async fn instance_simulate(nexus: &Arc, id: &Uuid) { +pub async fn instance_simulate(nexus: &Arc, id: &Uuid) { let sa = nexus.instance_sled_by_id(id).await.unwrap(); sa.instance_finish_transition(id.clone()).await; } diff --git a/nexus/tests/integration_tests/vpc_firewall.rs b/nexus/tests/integration_tests/vpc_firewall.rs index d60c0d35bdf..e8242de4c23 100644 --- a/nexus/tests/integration_tests/vpc_firewall.rs +++ b/nexus/tests/integration_tests/vpc_firewall.rs @@ -134,7 +134,15 @@ async fn test_vpc_firewall(cptestctx: &ControlPlaneTestContext) { .await .unwrap(); - // Delete a VPC and ensure we can't read its firewall anymore + // Delete a VPC Subnet / VPC and ensure we can't read its firewall anymore + NexusRequest::object_delete( + client, + format!("{}/{}/subnets/default", vpcs_url, other_vpc).as_str(), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); NexusRequest::object_delete( client, format!("{}/{}", vpcs_url, other_vpc).as_str(), diff --git a/nexus/tests/integration_tests/vpc_subnets.rs b/nexus/tests/integration_tests/vpc_subnets.rs index 906658206fd..35f427a4b76 100644 --- a/nexus/tests/integration_tests/vpc_subnets.rs +++ b/nexus/tests/integration_tests/vpc_subnets.rs @@ -2,6 +2,10 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::integration_tests::instances::instance_post; +use crate::integration_tests::instances::instance_simulate; +use crate::integration_tests::instances::InstanceOp; +use dropshot::HttpErrorResponseBody; use http::method::Method; use http::StatusCode; use nexus_test_utils::http_testing::AuthnMode; @@ -10,7 +14,8 @@ use nexus_test_utils::http_testing::RequestBuilder; use nexus_test_utils::identity_eq; use nexus_test_utils::resource_helpers::objects_list_page_authz; use nexus_test_utils::resource_helpers::{ - create_organization, create_project, create_vpc, + create_instance, create_ip_pool, create_organization, create_project, + create_vpc, }; use nexus_test_utils::ControlPlaneTestContext; use nexus_test_utils_macros::nexus_test; @@ -20,6 +25,81 @@ use omicron_common::api::external::Ipv4Net; use omicron_common::api::external::Ipv6Net; use omicron_nexus::external_api::{params, views::VpcSubnet}; +#[nexus_test] +async fn test_delete_vpc_subnet_with_interfaces_fails( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let apictx = &cptestctx.server.apictx; + let nexus = &apictx.nexus; + + // Create a project that we'll use for testing. + let org_name = "test-org"; + let project_name = "springfield-squidport"; + create_organization(&client, &org_name).await; + let _ = create_project(&client, org_name, project_name).await; + create_ip_pool(client, "pool0", None, None).await; + + let vpcs_url = + format!("/organizations/{}/projects/{}/vpcs", org_name, project_name); + let vpc_url = format!("{vpcs_url}/default"); + let subnets_url = format!("{vpc_url}/subnets"); + let subnet_url = format!("{subnets_url}/default"); + + // get subnets should return the default subnet + let subnets = + objects_list_page_authz::(client, &subnets_url).await.items; + assert_eq!(subnets.len(), 1); + + // Create an instance in the default VPC and VPC Subnet. Verify that we + // cannot delete the subnet until the instance is gone. + let instance_url = format!( + "/organizations/{org_name}/projects/{project_name}/instances/inst" + ); + let instance = + create_instance(client, &org_name, project_name, "inst").await; + instance_simulate(nexus, &instance.identity.id).await; + let err: HttpErrorResponseBody = NexusRequest::expect_failure( + &client, + StatusCode::BAD_REQUEST, + Method::DELETE, + &subnet_url, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!( + err.message, + "VPC Subnet cannot be deleted while instances \ + with network interfaces in the subnet exist", + ); + + // Stop and then delete the instance + instance_post(client, &instance_url, InstanceOp::Stop).await; + instance_simulate(&nexus, &instance.identity.id).await; + NexusRequest::object_delete(&client, &instance_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + + // Now deleting the subnet should succeed + NexusRequest::object_delete( + &client, + &format!("{}/{}", subnets_url, subnets[0].identity.name), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); + let subnets = + objects_list_page_authz::(client, &subnets_url).await.items; + assert!(subnets.is_empty()); +} + #[nexus_test] async fn test_vpc_subnets(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; diff --git a/nexus/tests/integration_tests/vpcs.rs b/nexus/tests/integration_tests/vpcs.rs index 00138874571..de75a7cb0e1 100644 --- a/nexus/tests/integration_tests/vpcs.rs +++ b/nexus/tests/integration_tests/vpcs.rs @@ -163,7 +163,28 @@ async fn test_vpcs(cptestctx: &ControlPlaneTestContext) { assert_eq!(vpc.identity.description, "another description"); assert_eq!(vpc.dns_name, "def"); - // Delete the VPC. + // Deleting the VPC should fail, since the subnet still exists. + let error: HttpErrorResponseBody = NexusRequest::expect_failure( + client, + StatusCode::BAD_REQUEST, + Method::DELETE, + &vpc_url, + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap(); + assert_eq!(error.message, "VPC cannot be deleted while VPC Subnets exist",); + + // Delete the default VPC Subnet and VPC. + let default_subnet_url = format!("{vpc_url}/subnets/default"); + NexusRequest::object_delete(client, &default_subnet_url) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap(); NexusRequest::object_delete(client, &vpc_url) .authn_as(AuthnMode::PrivilegedUser) .execute()