Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

migrator: allow removing settings in migrations #644

Merged
merged 4 commits into from
Feb 1, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 2 additions & 7 deletions workspaces/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions workspaces/api/apiserver/src/datastore/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ pub enum Error {
#[snafu(display("Reading key '{}' failed: {}", key, source))]
KeyRead { key: String, source: io::Error },

#[snafu(display("Removing key at '{}' failed: {}", path.display(), source))]
DeleteKey { path: PathBuf, source: io::Error },

#[snafu(display("IO error on '{}': {}", path.display(), source))]
Io { path: PathBuf, source: io::Error },

Expand Down
76 changes: 76 additions & 0 deletions workspaces/api/apiserver/src/datastore/filesystem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,72 @@ impl FilesystemDataStore {

Ok(path_str.into())
}

/// Deletes the given path from the filesystem. Also removes the parent directory if empty
/// (repeatedly, up to the base path), so as to have consistent artifacts on the filesystem
/// after adding and removing keys.
///
/// If the path doesn't exist, we still return Ok for idempotency, but if it exists and we
/// fail to remove it, we return Err.
///
/// If we fail to remove an empty directory, we log an error, but still return Ok. (The
/// error for trying to remove an empty directory is not specific, and we don't want to rely
/// on platform-specific error codes or the error description. We could check the directory
/// contents ourself, but it would be more complex and subject to timing issues.)
fn delete_key_path<P>(&mut self, path: P, committed: Committed) -> Result<()>
where
P: AsRef<Path>,
{
let path = path.as_ref();

// Remove the file. If it doesn't exist, we're still OK.
match fs::remove_file(path) {
Ok(()) => {}
Err(e) => {
if e.kind() != io::ErrorKind::NotFound {
return Err(e).context(error::DeleteKey { path });
}
}
}

// Remove the directory if it's empty, i.e. if the setting we removed was the last setting
// in that prefix. Continue up the tree until the base, in case it was the only thing in
// that subtree.
let base = self.base_path(committed);
if let Some(parent) = path.parent() {
// Note: ancestors() includes 'parent' itself
for parent in parent.ancestors() {
// Stop at the base directory; we don't expect anything here or above to be empty,
// but stop as a safeguard.
if parent == base {
break;
}
if let Err(e) = fs::remove_dir(parent) {
// If the directory doesn't exist, continue up the tree. Modulo timing issues,
// this means the key didn't exist either, which means a previous attempt to remove
// the directory failed or we got an unset request for a bogus key. Either way, we
// can clean up and make things consistent.
if e.kind() == io::ErrorKind::NotFound {
continue;

// "Directory not empty" doesn't have its own ErrorKind, so we have to check a
// platform-specific error number or the error description, neither of which is
// ideal. Still, we can at least log an error in the case we know. Don't
// fail, though, because we've still accomplished our main purpose.
} else if e.raw_os_error() != Some(39) {
error!(
"Failed to delete directory '{}' we believe is empty: {}",
parent.display(),
e
);
}
// We won't be able to delete parent directories if this one still exists.
break;
}
}
}
Ok(())
}
}

// Filesystem helpers
Expand Down Expand Up @@ -382,6 +448,11 @@ impl DataStore for FilesystemDataStore {
write_file_mkdir(path, value)
}

fn unset_key(&mut self, key: &Key, committed: Committed) -> Result<()> {
let path = self.data_path(key, committed)?;
self.delete_key_path(path, committed)
}

fn get_metadata_raw(&self, metadata_key: &Key, data_key: &Key) -> Result<Option<String>> {
let path = self.metadata_path(metadata_key, data_key, Committed::Live)?;
read_file_for_key(&metadata_key, &path)
Expand All @@ -397,6 +468,11 @@ impl DataStore for FilesystemDataStore {
write_file_mkdir(path, value)
}

fn unset_metadata(&mut self, metadata_key: &Key, data_key: &Key) -> Result<()> {
let path = self.metadata_path(metadata_key, data_key, Committed::Live)?;
self.delete_key_path(path, Committed::Live)
}

/// We commit by copying pending keys to live, then removing pending. Something smarter (lock,
/// atomic flip, etc.) will be required to make the server concurrent.
fn commit(&mut self) -> Result<HashSet<Key>> {
Expand Down
25 changes: 22 additions & 3 deletions workspaces/api/apiserver/src/datastore/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use std::mem;
use super::{Committed, DataStore, Key, Result};

#[derive(Debug)]
pub(crate) struct MemoryDataStore {
pub struct MemoryDataStore {
// Uncommitted (pending) data.
pending: HashMap<Key, String>,
// Committed (live) data.
Expand All @@ -20,7 +20,7 @@ pub(crate) struct MemoryDataStore {
}

impl MemoryDataStore {
pub(crate) fn new() -> Self {
pub fn new() -> Self {
Self {
pending: HashMap::new(),
live: HashMap::new(),
Expand Down Expand Up @@ -103,6 +103,11 @@ impl DataStore for MemoryDataStore {
Ok(())
}

fn unset_key(&mut self, key: &Key, committed: Committed) -> Result<()> {
self.dataset_mut(committed).remove(key);
Ok(())
}

fn key_populated(&self, key: &Key, committed: Committed) -> Result<bool> {
Ok(self.dataset(committed).contains_key(key))
}
Expand Down Expand Up @@ -133,6 +138,14 @@ impl DataStore for MemoryDataStore {
Ok(())
}

fn unset_metadata(&mut self, metadata_key: &Key, data_key: &Key) -> Result<()> {
// If we have any metadata for this data key, remove the given metadata key.
if let Some(metadata_for_data) = self.metadata.get_mut(data_key) {
metadata_for_data.remove(metadata_key);
}
Ok(())
}

fn commit(&mut self) -> Result<HashSet<Key>> {
// We need a clone of the pending keys so we can set_keys (which holds &mut self) and we
// have to clone the keys anyway for the return value.
Expand Down Expand Up @@ -164,7 +177,7 @@ mod test {
use maplit::hashset;

#[test]
fn get_set() {
fn get_set_unset() {
let mut m = MemoryDataStore::new();
let k = Key::new(KeyType::Data, "memtest").unwrap();
let v = "memvalue";
Expand All @@ -178,6 +191,12 @@ mod test {
m.get_metadata_raw(&mdkey, &k).unwrap(),
Some(md.to_string())
);

m.unset_metadata(&mdkey, &k).unwrap();
assert_eq!(m.get_metadata_raw(&mdkey, &k).unwrap(), None);

m.unset_key(&k, Committed::Live).unwrap();
assert_eq!(m.get_key(&k, Committed::Live).unwrap(), None);
}

#[test]
Expand Down
37 changes: 33 additions & 4 deletions workspaces/api/apiserver/src/datastore/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ pub mod deserialization;
pub mod error;
pub mod filesystem;
pub mod key;
#[cfg(test)]
pub(crate) mod memory;
pub mod memory;
pub mod serialization;

pub use error::{Error, Result};
Expand Down Expand Up @@ -61,6 +60,10 @@ pub trait DataStore {
fn get_key(&self, key: &Key, committed: Committed) -> Result<Option<String>>;
/// Set the value of a single data key in the datastore.
fn set_key<S: AsRef<str>>(&mut self, key: &Key, value: S, committed: Committed) -> Result<()>;
/// Removes the given data key from the datastore. If we succeeded, we return Ok(()); if
/// the key didn't exist, we also return Ok(()); we return Err only if we failed to check
/// or remove the key.
fn unset_key(&mut self, key: &Key, committed: Committed) -> Result<()>;

/// Retrieve the value for a single metadata key from the datastore. Values will inherit from
/// earlier in the tree, if more specific values are not found later.
Expand Down Expand Up @@ -93,6 +96,10 @@ pub trait DataStore {
data_key: &Key,
value: S,
) -> Result<()>;
/// Removes the given metadata key from the given data key in the datastore. If we
/// succeeded, we return Ok(()); if the data or metadata key didn't exist, we also return
/// Ok(()); we return Err only if we failed to check or remove the key.
fn unset_metadata(&mut self, metadata_key: &Key, data_key: &Key) -> Result<()>;

/// Applies pending changes to the live datastore. Returns the list of changed keys.
fn commit(&mut self) -> Result<HashSet<Key>>;
Expand All @@ -114,6 +121,17 @@ pub trait DataStore {
}
Ok(())
}
/// Removes multiple data keys at once in the data store.
///
/// Implementers can replace the default implementation if there's a faster way than
/// unsetting each key individually.
fn unset_keys(&mut self, keys: &HashSet<Key>, committed: Committed) -> Result<()> {
for key in keys {
trace!("Unsetting data key {}", key.name());
self.unset_key(key, committed)?;
}
Ok(())
}

/// Retrieves all keys starting with the given prefix, returning them in a Key -> value map.
///
Expand Down Expand Up @@ -237,25 +255,36 @@ pub type Value = serde_json::Value;
mod test {
use super::memory::MemoryDataStore;
use super::{Committed, DataStore, Key, KeyType};
use maplit::hashmap;
use maplit::{hashmap, hashset};

#[test]
fn set_keys() {
fn set_unset_keys() {
let mut m = MemoryDataStore::new();

let k1 = Key::new(KeyType::Data, "memtest1").unwrap();
let k2 = Key::new(KeyType::Data, "memtest2").unwrap();
let k3 = Key::new(KeyType::Data, "memtest3").unwrap();
let v1 = "memvalue1".to_string();
let v2 = "memvalue2".to_string();
let v3 = "memvalue3".to_string();
let data = hashmap!(
k1.clone() => &v1,
k2.clone() => &v2,
k3.clone() => &v3,
);

m.set_keys(&data, Committed::Pending).unwrap();

assert_eq!(m.get_key(&k1, Committed::Pending).unwrap(), Some(v1));
assert_eq!(m.get_key(&k2, Committed::Pending).unwrap(), Some(v2));
assert_eq!(m.get_key(&k3, Committed::Pending).unwrap(), Some(v3.clone()));

let unset = hashset!(k1.clone(), k2.clone());
m.unset_keys(&unset, Committed::Pending).unwrap();

assert_eq!(m.get_key(&k1, Committed::Pending).unwrap(), None);
assert_eq!(m.get_key(&k2, Committed::Pending).unwrap(), None);
assert_eq!(m.get_key(&k3, Committed::Pending).unwrap(), Some(v3));
}

#[test]
Expand Down
4 changes: 4 additions & 0 deletions workspaces/api/migration/migration-helpers/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,7 @@ publish = false
apiserver = { path = "../../apiserver" }
snafu = "0.6"
toml = "0.5"

[dev-dependencies]
maplit = "1.0"
serde_json = "1.0"
44 changes: 40 additions & 4 deletions workspaces/api/migration/migration-helpers/src/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ use crate::{MigrationType, Result};

/// Stores user-supplied arguments.
pub struct Args {
pub datastore_path: String,
pub source_datastore: String,
pub target_datastore: String,
pub migration_type: MigrationType,
}

Expand All @@ -16,7 +17,8 @@ fn usage() -> ! {
let program_name = env::args().next().unwrap_or_else(|| "program".to_string());
eprintln!(
r"Usage: {}
--datastore-path PATH
--source-datastore PATH
--target-datastore PATH
( --forward | --backward )",
program_name
);
Expand All @@ -32,16 +34,33 @@ fn usage_msg<S: AsRef<str>>(msg: S) -> ! {
/// Parses user arguments into an Args structure.
pub(crate) fn parse_args(args: env::Args) -> Result<Args> {
let mut migration_type = None;
let mut source_datastore = None;
let mut target_datastore = None;
let mut datastore_path = None;

let mut iter = args.skip(1);
while let Some(arg) = iter.next() {
match arg.as_ref() {
"--source-datastore" => {
source_datastore =
Some(iter.next().unwrap_or_else(|| {
usage_msg("Did not give argument to --source-datastore")
}))
}

"--target-datastore" => {
target_datastore =
Some(iter.next().unwrap_or_else(|| {
usage_msg("Did not give argument to --target-datastore")
}))
}

// Support the argument of the old migrator interface, with some caveats
"--datastore-path" => {
datastore_path = Some(
iter.next()
.unwrap_or_else(|| usage_msg("Did not give argument to --datastore-path")),
)
);
}

"--forward" => migration_type = Some(MigrationType::Forward),
Expand All @@ -51,8 +70,25 @@ pub(crate) fn parse_args(args: env::Args) -> Result<Args> {
}
}

if let Some(datastore_path) = datastore_path {
// For compatibility with old migration interface that had single source+target; other code
// in migration-helpers checks if source==target to see if it needs to do a workaround.
if source_datastore.is_some() || target_datastore.is_some() {
usage_msg("--datastore-path is only for backward compatibility and \
cannot be used with --source-datastore / --target-datastore");
}
source_datastore = Some(datastore_path.clone());
target_datastore = Some(datastore_path);
} else {
// In no other case should they be the same; we use it for compatibility checks.
if source_datastore == target_datastore {
usage_msg("--source-datastore and --target-datastore cannot be the same");
}
}

Ok(Args {
datastore_path: datastore_path.unwrap_or_else(|| usage()),
source_datastore: source_datastore.unwrap_or_else(|| usage()),
target_datastore: target_datastore.unwrap_or_else(|| usage()),
migration_type: migration_type.unwrap_or_else(|| usage()),
})
}
Loading