-
Notifications
You must be signed in to change notification settings - Fork 61
Support different types of Disk #9274
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 4 commits
22e1eff
2a52cfa
c4d752a
cf65891
f4425c6
227268d
ea48807
f751308
826b5df
2e61172
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -65,7 +65,6 @@ use nexus_db_errors::OptionalError; | |
| use nexus_db_lookup::DataStoreConnection; | ||
| use nexus_db_lookup::LookupPath; | ||
| use nexus_db_model::CrucibleDataset; | ||
| use nexus_db_model::Disk; | ||
| use nexus_db_model::DnsGroup; | ||
| use nexus_db_model::DnsName; | ||
| use nexus_db_model::DnsVersion; | ||
|
|
@@ -116,7 +115,9 @@ use nexus_db_model::to_db_typed_uuid; | |
| use nexus_db_queries::context::OpContext; | ||
| use nexus_db_queries::db; | ||
| use nexus_db_queries::db::DataStore; | ||
| use nexus_db_queries::db::datastore::CrucibleDisk; | ||
| use nexus_db_queries::db::datastore::CrucibleTargets; | ||
| use nexus_db_queries::db::datastore::Disk; | ||
| use nexus_db_queries::db::datastore::InstanceAndActiveVmm; | ||
| use nexus_db_queries::db::datastore::InstanceStateComputer; | ||
| use nexus_db_queries::db::datastore::SQL_BATCH_SIZE; | ||
|
|
@@ -1910,7 +1911,7 @@ async fn cmd_db_disk_list( | |
|
|
||
| let disks = query | ||
| .limit(i64::from(u32::from(fetch_opts.fetch_limit))) | ||
| .select(Disk::as_select()) | ||
| .select(db::model::Disk::as_select()) | ||
| .load_async(&*datastore.pool_connection_for_tests().await?) | ||
| .await | ||
| .context("loading disks")?; | ||
|
|
@@ -2096,11 +2097,10 @@ async fn cmd_db_rack_list( | |
| Ok(()) | ||
| } | ||
|
|
||
| /// Run `omdb db disk info <UUID>`. | ||
| async fn cmd_db_disk_info( | ||
| async fn crucible_disk_info( | ||
| opctx: &OpContext, | ||
| datastore: &DataStore, | ||
| args: &DiskInfoArgs, | ||
| disk: CrucibleDisk, | ||
| ) -> Result<(), anyhow::Error> { | ||
| // The row describing the instance | ||
| #[derive(Tabled)] | ||
|
|
@@ -2125,20 +2125,17 @@ async fn cmd_db_disk_info( | |
| physical_disk: String, | ||
| } | ||
|
|
||
| use nexus_db_schema::schema::disk::dsl as disk_dsl; | ||
|
|
||
| let conn = datastore.pool_connection_for_tests().await?; | ||
|
|
||
| let disk = disk_dsl::disk | ||
| .filter(disk_dsl::id.eq(args.uuid)) | ||
| .limit(1) | ||
| .select(Disk::as_select()) | ||
| .load_async(&*conn) | ||
| .await | ||
| .context("loading requested disk")?; | ||
| let disk_name = disk.name().to_string(); | ||
|
|
||
| let Some(disk) = disk.into_iter().next() else { | ||
| bail!("no disk: {} found", args.uuid); | ||
| let volume_id = disk.volume_id().to_string(); | ||
|
|
||
| let disk_state = disk.runtime().disk_state.to_string(); | ||
|
|
||
| let import_address = match disk.pantry_address() { | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I came here to complain about this, but when I actually see what you did, it's right and |
||
| Some(ref pa) => pa.clone().to_string(), | ||
| None => "-".to_string(), | ||
| }; | ||
|
|
||
| // For information about where this disk is attached. | ||
|
|
@@ -2172,7 +2169,7 @@ async fn cmd_db_disk_info( | |
| }; | ||
|
|
||
| let instance_name = instance.instance().name().to_string(); | ||
| let disk_name = disk.name().to_string(); | ||
|
|
||
| if instance.vmm().is_some() { | ||
| let propolis_id = | ||
| instance.instance().runtime().propolis_id.unwrap(); | ||
|
|
@@ -2184,48 +2181,36 @@ async fn cmd_db_disk_info( | |
| .await | ||
| .context("failed to look up sled")?; | ||
|
|
||
| let import_address = match disk.pantry_address { | ||
| Some(ref pa) => pa.clone().to_string(), | ||
| None => "-".to_string(), | ||
| }; | ||
| UpstairsRow { | ||
| host_serial: my_sled.serial_number().to_string(), | ||
| disk_name, | ||
| instance_name, | ||
| propolis_zone: format!("oxz_propolis-server_{}", propolis_id), | ||
| volume_id: disk.volume_id().to_string(), | ||
| disk_state: disk.runtime_state.disk_state.to_string(), | ||
| volume_id, | ||
| disk_state, | ||
| import_address, | ||
| } | ||
| } else { | ||
| let import_address = match disk.pantry_address { | ||
| Some(ref pa) => pa.clone().to_string(), | ||
| None => "-".to_string(), | ||
| }; | ||
| UpstairsRow { | ||
| host_serial: NOT_ON_SLED_MSG.to_string(), | ||
| disk_name, | ||
| instance_name, | ||
| propolis_zone: NO_ACTIVE_PROPOLIS_MSG.to_string(), | ||
| volume_id: disk.volume_id().to_string(), | ||
| disk_state: disk.runtime_state.disk_state.to_string(), | ||
| volume_id, | ||
| disk_state, | ||
| import_address, | ||
| } | ||
| } | ||
| } else { | ||
| // If the disk is not attached to anything, just print empty | ||
| // fields. | ||
| let import_address = match disk.pantry_address { | ||
| Some(ref pa) => pa.clone().to_string(), | ||
| None => "-".to_string(), | ||
| }; | ||
| UpstairsRow { | ||
| host_serial: "-".to_string(), | ||
| disk_name: disk.name().to_string(), | ||
| disk_name, | ||
| instance_name: "-".to_string(), | ||
| propolis_zone: "-".to_string(), | ||
| volume_id: disk.volume_id().to_string(), | ||
| disk_state: disk.runtime_state.disk_state.to_string(), | ||
| volume_id, | ||
| disk_state, | ||
| import_address, | ||
| } | ||
| }; | ||
|
|
@@ -2274,9 +2259,23 @@ async fn cmd_db_disk_info( | |
| println!("{}", table); | ||
|
|
||
| get_and_display_vcr(disk.volume_id(), datastore).await?; | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| /// Run `omdb db disk info <UUID>`. | ||
| async fn cmd_db_disk_info( | ||
| opctx: &OpContext, | ||
| datastore: &DataStore, | ||
| args: &DiskInfoArgs, | ||
| ) -> Result<(), anyhow::Error> { | ||
| match datastore.disk_get(opctx, args.uuid).await? { | ||
| Disk::Crucible(disk) => { | ||
| crucible_disk_info(opctx, datastore, disk).await | ||
| } | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, this is where we will update things to display the new disks types. cool cool. |
||
| } | ||
| } | ||
|
|
||
| // Given a UUID, search the database for a volume with that ID | ||
| // If found, attempt to parse the .data field into a VolumeConstructionRequest | ||
| // and display it if successful. | ||
|
|
@@ -2397,25 +2396,22 @@ async fn cmd_db_disk_physical( | |
| .context("loading region")?; | ||
|
|
||
| for rs in regions { | ||
| volume_ids.insert(rs.volume_id().into_untyped_uuid()); | ||
| volume_ids.insert(rs.volume_id()); | ||
| } | ||
| } | ||
|
|
||
| // At this point, we have a list of volume IDs that contain a region | ||
| // that is part of a dataset on a pool on our disk. The next step is | ||
| // to find the virtual disks associated with these volume IDs and | ||
| // display information about those disks. | ||
| use nexus_db_schema::schema::disk::dsl; | ||
| let mut query = dsl::disk.into_boxed(); | ||
| if !fetch_opts.include_deleted { | ||
| query = query.filter(dsl::time_deleted.is_null()); | ||
| } | ||
|
|
||
| let disks = query | ||
| .filter(dsl::volume_id.eq_any(volume_ids)) | ||
| .limit(i64::from(u32::from(fetch_opts.fetch_limit))) | ||
| .select(Disk::as_select()) | ||
| .load_async(&*conn) | ||
| let disks: Vec<CrucibleDisk> = datastore | ||
| .disks_get_matching_volumes( | ||
| &conn, | ||
| &volume_ids, | ||
| fetch_opts.include_deleted, | ||
| i64::from(u32::from(fetch_opts.fetch_limit)), | ||
| ) | ||
| .await | ||
| .context("loading disks")?; | ||
|
|
||
|
|
@@ -3472,26 +3468,20 @@ async fn volume_used_by( | |
| fetch_opts: &DbFetchOptions, | ||
| volumes: &[Uuid], | ||
| ) -> Result<Vec<VolumeUsedBy>, anyhow::Error> { | ||
| let disks_used: Vec<Disk> = { | ||
| let volumes = volumes.to_vec(); | ||
| datastore | ||
| .pool_connection_for_tests() | ||
| .await? | ||
| .transaction_async(async move |conn| { | ||
| use nexus_db_schema::schema::disk::dsl; | ||
|
|
||
| conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await?; | ||
| let disks_used: Vec<CrucibleDisk> = { | ||
| let conn = datastore.pool_connection_for_tests().await?; | ||
| let volumes: HashSet<VolumeUuid> = volumes | ||
| .iter() | ||
| .map(|id| VolumeUuid::from_untyped_uuid(*id)) | ||
| .collect(); | ||
|
|
||
| paginated( | ||
| dsl::disk, | ||
| dsl::id, | ||
| &first_page::<dsl::id>(fetch_opts.fetch_limit), | ||
| ) | ||
| .filter(dsl::volume_id.eq_any(volumes)) | ||
| .select(Disk::as_select()) | ||
| .load_async(&conn) | ||
| .await | ||
| }) | ||
| datastore | ||
| .disks_get_matching_volumes( | ||
| &conn, | ||
| &volumes, | ||
| fetch_opts.include_deleted, | ||
| i64::from(u32::from(fetch_opts.fetch_limit)), | ||
| ) | ||
| .await? | ||
| }; | ||
|
|
||
|
|
@@ -4632,7 +4622,7 @@ async fn cmd_db_instance_info( | |
| } | ||
|
|
||
| let disks = query | ||
| .select(Disk::as_select()) | ||
| .select(db::model::Disk::as_select()) | ||
| .load_async(&*datastore.pool_connection_for_tests().await?) | ||
| .await | ||
| .with_context(ctx)?; | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nit, this is already a struct of type
Disk, why not just have this field betype?You have a field already that is
stateand notdisk_state