Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace unmaintained dotenv dependency with dotenvy #6461

Merged
merged 1 commit into from
May 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ dialoguer = "=0.10.4"
diesel = { version = "=2.0.4", features = ["postgres", "serde_json", "chrono", "r2d2"] }
diesel_full_text_search = "=2.0.0"
diesel_migrations = { version = "=2.0.0", features = ["postgres"] }
dotenv = "=0.15.0"
dotenvy = "=0.15.7"
flate2 = "=1.0.26"
futures-channel = { version = "=0.3.28", default-features = false }
futures-util = "=0.3.28"
Expand Down Expand Up @@ -102,4 +102,4 @@ tower-service = "=0.3.2"
[build-dependencies]
diesel = { version = "=2.0.4", features = ["postgres"] }
diesel_migrations = { version = "=2.0.0", features = ["postgres"] }
dotenv = "=0.15.0"
dotenvy = "=0.15.7"
2 changes: 1 addition & 1 deletion build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ fn main() {
println!("cargo:rerun-if-changed=.env");
println!("cargo:rerun-if-changed=migrations/");
if env::var("PROFILE") == Ok("debug".into()) {
if let Ok(database_url) = dotenv::var("TEST_DATABASE_URL") {
if let Ok(database_url) = dotenvy::var("TEST_DATABASE_URL") {
let connection = &mut PgConnection::establish(&database_url)
.expect("Could not connect to TEST_DATABASE_URL");
let migrations = FileBasedMigrations::find_migrations_directory()
Expand Down
2 changes: 1 addition & 1 deletion cargo-registry-index/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ testing = []
[dependencies]
anyhow = "=1.0.71"
base64 = "=0.13.1"
dotenv = "=0.15.0"
dotenvy = "=0.15.7"
git2 = "=0.17.1"
serde = { version = "=1.0.162", features = ["derive"] }
serde_json = "=1.0.96"
Expand Down
10 changes: 5 additions & 5 deletions cargo-registry-index/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,12 +230,12 @@ pub struct RepositoryConfig {

impl RepositoryConfig {
pub fn from_environment() -> Self {
let username = dotenv::var("GIT_HTTP_USER");
let password = dotenv::var("GIT_HTTP_PWD");
let http_url = dotenv::var("GIT_REPO_URL");
let username = dotenvy::var("GIT_HTTP_USER");
let password = dotenvy::var("GIT_HTTP_PWD");
let http_url = dotenvy::var("GIT_REPO_URL");

let ssh_key = dotenv::var("GIT_SSH_KEY");
let ssh_url = dotenv::var("GIT_SSH_REPO_URL");
let ssh_key = dotenvy::var("GIT_SSH_KEY");
let ssh_url = dotenvy::var("GIT_SSH_REPO_URL");

match (username, password, http_url, ssh_key, ssh_url) {
(extra_user, extra_pass, extra_http_url, Ok(encoded_key), Ok(ssh_url)) => {
Expand Down
4 changes: 2 additions & 2 deletions src/admin/on_call.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ impl Event {
/// If the variant is `Trigger`, this will page whoever is on call
/// (potentially waking them up at 3 AM).
pub fn send(self) -> Result<()> {
let api_token = dotenv::var("PAGERDUTY_API_TOKEN")?;
let service_key = dotenv::var("PAGERDUTY_INTEGRATION_KEY")?;
let api_token = dotenvy::var("PAGERDUTY_API_TOKEN")?;
let service_key = dotenvy::var("PAGERDUTY_INTEGRATION_KEY")?;

let response = Client::new()
.post("https://events.pagerduty.com/generic/2010-04-15/create_event.json")
Expand Down
6 changes: 3 additions & 3 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,14 +92,14 @@ impl App {
),
);

let db_helper_threads = match (dotenv::var("DB_HELPER_THREADS"), config.env()) {
let db_helper_threads = match (dotenvy::var("DB_HELPER_THREADS"), config.env()) {
(Ok(num), _) => num.parse().expect("couldn't parse DB_HELPER_THREADS"),
(_, Env::Production) => 3,
_ => 1,
};

// Used as the connection and statement timeout value for the database pool(s)
let db_connection_timeout = match (dotenv::var("DB_TIMEOUT"), config.env()) {
let db_connection_timeout = match (dotenvy::var("DB_TIMEOUT"), config.env()) {
(Ok(num), _) => num.parse().expect("couldn't parse DB_TIMEOUT"),
(_, Env::Production) => 10,
(_, Env::Test) => 1,
Expand Down Expand Up @@ -170,7 +170,7 @@ impl App {
.time_to_live(config.version_id_cache_ttl)
.build();

let fastboot_client = match dotenv::var("USE_FASTBOOT") {
let fastboot_client = match dotenvy::var("USE_FASTBOOT") {
Ok(val) if val == "staging-experimental" => Some(reqwest::Client::new()),
_ => None,
};
Expand Down
4 changes: 2 additions & 2 deletions src/bin/background-worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,14 @@ fn main() {

let db_url = db::connection_url(&config.db, &config.db.primary.url);

let job_start_timeout = dotenv::var("BACKGROUND_JOB_TIMEOUT")
let job_start_timeout = dotenvy::var("BACKGROUND_JOB_TIMEOUT")
.unwrap_or_else(|_| "30".into())
.parse()
.expect("Invalid value for `BACKGROUND_JOB_TIMEOUT`");

info!("Cloning index");

if dotenv::var("HEROKU").is_ok() {
if dotenvy::var("HEROKU").is_ok() {
ssh::write_known_hosts_file().unwrap();
}

Expand Down
6 changes: 3 additions & 3 deletions src/bin/monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ fn check_failing_background_jobs(conn: &mut PgConnection) -> Result<()> {
println!("Checking for failed background jobs");

// Max job execution time in minutes
let max_job_time = dotenv::var("MAX_JOB_TIME")
let max_job_time = dotenvy::var("MAX_JOB_TIME")
.map(|s| s.parse::<i32>().unwrap())
.unwrap_or(15);

Expand Down Expand Up @@ -78,7 +78,7 @@ fn check_stalled_update_downloads(conn: &mut PgConnection) -> Result<()> {
println!("Checking for stalled background jobs");

// Max job execution time in minutes
let max_job_time = dotenv::var("MONITOR_MAX_UPDATE_DOWNLOADS_TIME")
let max_job_time = dotenvy::var("MONITOR_MAX_UPDATE_DOWNLOADS_TIME")
.map(|s| s.parse::<u32>().unwrap() as i64)
.unwrap_or(120);

Expand Down Expand Up @@ -113,7 +113,7 @@ fn check_spam_attack(conn: &mut PgConnection) -> Result<()> {

println!("Checking for crates indicating someone is spamming us");

let bad_crate_names = dotenv::var("SPAM_CRATE_NAMES");
let bad_crate_names = dotenvy::var("SPAM_CRATE_NAMES");
let bad_crate_names: Vec<_> = bad_crate_names
.as_ref()
.map(|s| s.split(',').collect())
Expand Down
8 changes: 4 additions & 4 deletions src/bin/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
let normalize_path = axum::middleware::from_fn(normalize_path);
let axum_router = normalize_path.layer(axum_router);

let heroku = dotenv::var("HEROKU").is_ok();
let fastboot = dotenv::var("USE_FASTBOOT").is_ok();
let dev_docker = dotenv::var("DEV_DOCKER").is_ok();
let heroku = dotenvy::var("HEROKU").is_ok();
let fastboot = dotenvy::var("USE_FASTBOOT").is_ok();
let dev_docker = dotenvy::var("DEV_DOCKER").is_ok();

let ip = if dev_docker {
[0, 0, 0, 0]
Expand All @@ -57,7 +57,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
_ => 8888,
};

let threads = dotenv::var("SERVER_THREADS")
let threads = dotenvy::var("SERVER_THREADS")
.map(|s| s.parse().expect("SERVER_THREADS was not a valid number"))
.unwrap_or_else(|_| match env {
Env::Development => 5,
Expand Down
14 changes: 7 additions & 7 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -128,18 +128,18 @@ impl Default for Server {
excluded_crate_names,
domain_name: domain_name(),
allowed_origins,
downloads_persist_interval_ms: dotenv::var("DOWNLOADS_PERSIST_INTERVAL_MS")
downloads_persist_interval_ms: dotenvy::var("DOWNLOADS_PERSIST_INTERVAL_MS")
.map(|interval| {
interval
.parse()
.expect("invalid DOWNLOADS_PERSIST_INTERVAL_MS")
})
.unwrap_or(60_000), // 1 minute
ownership_invitations_expiration_days: 30,
metrics_authorization_token: dotenv::var("METRICS_AUTHORIZATION_TOKEN").ok(),
metrics_authorization_token: dotenvy::var("METRICS_AUTHORIZATION_TOKEN").ok(),
use_test_database_pool: false,
instance_metrics_log_every_seconds: env_optional("INSTANCE_METRICS_LOG_EVERY_SECONDS"),
force_unconditional_redirects: dotenv::var("FORCE_UNCONDITIONAL_REDIRECTS").is_ok(),
force_unconditional_redirects: dotenvy::var("FORCE_UNCONDITIONAL_REDIRECTS").is_ok(),
blocked_routes: env_optional("BLOCKED_ROUTES")
.map(|routes: String| routes.split(',').map(|s| s.into()).collect())
.unwrap_or_else(HashSet::new),
Expand All @@ -148,7 +148,7 @@ impl Default for Server {
version_id_cache_ttl: Duration::from_secs(
env_optional("VERSION_ID_CACHE_TTL").unwrap_or(DEFAULT_VERSION_ID_CACHE_TTL),
),
cdn_user_agent: dotenv::var("WEB_CDN_USER_AGENT")
cdn_user_agent: dotenvy::var("WEB_CDN_USER_AGENT")
.unwrap_or_else(|_| "Amazon CloudFront".into()),
balance_capacity: BalanceCapacityConfig::from_environment(),
}
Expand All @@ -166,7 +166,7 @@ impl Server {
}

pub(crate) fn domain_name() -> String {
dotenv::var("DOMAIN_NAME").unwrap_or_else(|_| "crates.io".into())
dotenvy::var("DOMAIN_NAME").unwrap_or_else(|_| "crates.io".into())
}

/// Parses a CIDR block string to a valid `IpNetwork` struct.
Expand Down Expand Up @@ -196,10 +196,10 @@ fn parse_cidr_block(block: &str) -> anyhow::Result<IpNetwork> {
}

fn blocked_traffic() -> Vec<(String, Vec<String>)> {
let pattern_list = dotenv::var("BLOCKED_TRAFFIC").unwrap_or_default();
let pattern_list = dotenvy::var("BLOCKED_TRAFFIC").unwrap_or_default();
parse_traffic_patterns(&pattern_list)
.map(|(header, value_env_var)| {
let value_list = dotenv::var(value_env_var).unwrap_or_default();
let value_list = dotenvy::var(value_env_var).unwrap_or_default();
let values = value_list.split(',').map(String::from).collect();
(header.into(), values)
})
Expand Down
36 changes: 18 additions & 18 deletions src/config/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ pub struct Base {

impl Base {
pub fn from_environment() -> Self {
let heroku = dotenv::var("HEROKU").is_ok();
let heroku = dotenvy::var("HEROKU").is_ok();
let env = if heroku {
Env::Production
} else {
Expand All @@ -33,7 +33,7 @@ impl Base {
}
// In Development mode, either running as a primary instance or a read-only mirror
_ => {
if dotenv::var("S3_BUCKET").is_ok() {
if dotenvy::var("S3_BUCKET").is_ok() {
// If we've set the `S3_BUCKET` variable to any value, use all of the values
// for the related S3 environment variables and configure the app to upload to
// and read from S3 like production does. All values except for bucket are
Expand All @@ -60,17 +60,17 @@ impl Base {
bucket: Box::new(s3::Bucket::new(
String::from("alexcrichton-test"),
None,
dotenv::var("AWS_ACCESS_KEY").unwrap_or_default(),
dotenv::var("AWS_SECRET_KEY").unwrap_or_default(),
dotenvy::var("AWS_ACCESS_KEY").unwrap_or_default(),
dotenvy::var("AWS_SECRET_KEY").unwrap_or_default(),
// When testing we route all API traffic over HTTP so we can
// sniff/record it, but everywhere else we use https
"http",
)),
index_bucket: Some(Box::new(s3::Bucket::new(
String::from("alexcrichton-test"),
None,
dotenv::var("AWS_ACCESS_KEY").unwrap_or_default(),
dotenv::var("AWS_SECRET_KEY").unwrap_or_default(),
dotenvy::var("AWS_ACCESS_KEY").unwrap_or_default(),
dotenvy::var("AWS_SECRET_KEY").unwrap_or_default(),
// When testing we route all API traffic over HTTP so we can
// sniff/record it, but everywhere else we use https
"http",
Expand All @@ -88,10 +88,10 @@ impl Base {
}

fn s3_panic_if_missing_keys() -> Uploader {
let index_bucket = match dotenv::var("S3_INDEX_BUCKET") {
let index_bucket = match dotenvy::var("S3_INDEX_BUCKET") {
Ok(name) => Some(Box::new(s3::Bucket::new(
name,
dotenv::var("S3_INDEX_REGION").ok(),
dotenvy::var("S3_INDEX_REGION").ok(),
env("AWS_ACCESS_KEY"),
env("AWS_SECRET_KEY"),
"https",
Expand All @@ -101,37 +101,37 @@ impl Base {
Uploader::S3 {
bucket: Box::new(s3::Bucket::new(
env("S3_BUCKET"),
dotenv::var("S3_REGION").ok(),
dotenvy::var("S3_REGION").ok(),
env("AWS_ACCESS_KEY"),
env("AWS_SECRET_KEY"),
"https",
)),
index_bucket,
cdn: dotenv::var("S3_CDN").ok(),
cdn: dotenvy::var("S3_CDN").ok(),
}
}

fn s3_maybe_read_only() -> Uploader {
let index_bucket = match dotenv::var("S3_INDEX_BUCKET") {
let index_bucket = match dotenvy::var("S3_INDEX_BUCKET") {
Ok(name) => Some(Box::new(s3::Bucket::new(
name,
dotenv::var("S3_INDEX_REGION").ok(),
dotenv::var("AWS_ACCESS_KEY").unwrap_or_default(),
dotenv::var("AWS_SECRET_KEY").unwrap_or_default(),
dotenvy::var("S3_INDEX_REGION").ok(),
dotenvy::var("AWS_ACCESS_KEY").unwrap_or_default(),
dotenvy::var("AWS_SECRET_KEY").unwrap_or_default(),
"https",
))),
Err(_) => None,
};
Uploader::S3 {
bucket: Box::new(s3::Bucket::new(
env("S3_BUCKET"),
dotenv::var("S3_REGION").ok(),
dotenv::var("AWS_ACCESS_KEY").unwrap_or_default(),
dotenv::var("AWS_SECRET_KEY").unwrap_or_default(),
dotenvy::var("S3_REGION").ok(),
dotenvy::var("AWS_ACCESS_KEY").unwrap_or_default(),
dotenvy::var("AWS_SECRET_KEY").unwrap_or_default(),
"https",
)),
index_bucket,
cdn: dotenv::var("S3_CDN").ok(),
cdn: dotenvy::var("S3_CDN").ok(),
}
}
}
16 changes: 8 additions & 8 deletions src/config/database_pools.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,37 +54,37 @@ impl DatabasePools {
/// This function panics if `DB_OFFLINE=leader` but `READ_ONLY_REPLICA_URL` is unset.
pub fn full_from_environment(base: &Base) -> Self {
let leader_url = env("DATABASE_URL");
let follower_url = dotenv::var("READ_ONLY_REPLICA_URL").ok();
let read_only_mode = dotenv::var("READ_ONLY_MODE").is_ok();
let follower_url = dotenvy::var("READ_ONLY_REPLICA_URL").ok();
let read_only_mode = dotenvy::var("READ_ONLY_MODE").is_ok();

let primary_pool_size = match dotenv::var("DB_PRIMARY_POOL_SIZE") {
let primary_pool_size = match dotenvy::var("DB_PRIMARY_POOL_SIZE") {
Ok(num) => num.parse().expect("couldn't parse DB_PRIMARY_POOL_SIZE"),
_ => Self::DEFAULT_POOL_SIZE,
};

let replica_pool_size = match dotenv::var("DB_REPLICA_POOL_SIZE") {
let replica_pool_size = match dotenvy::var("DB_REPLICA_POOL_SIZE") {
Ok(num) => num.parse().expect("couldn't parse DB_REPLICA_POOL_SIZE"),
_ => Self::DEFAULT_POOL_SIZE,
};

let primary_min_idle = match dotenv::var("DB_PRIMARY_MIN_IDLE") {
let primary_min_idle = match dotenvy::var("DB_PRIMARY_MIN_IDLE") {
Ok(num) => Some(num.parse().expect("couldn't parse DB_PRIMARY_MIN_IDLE")),
_ => None,
};

let replica_min_idle = match dotenv::var("DB_REPLICA_MIN_IDLE") {
let replica_min_idle = match dotenvy::var("DB_REPLICA_MIN_IDLE") {
Ok(num) => Some(num.parse().expect("couldn't parse DB_REPLICA_MIN_IDLE")),
_ => None,
};

let tcp_timeout_ms = match dotenv::var("DB_TCP_TIMEOUT_MS") {
let tcp_timeout_ms = match dotenvy::var("DB_TCP_TIMEOUT_MS") {
Ok(num) => num.parse().expect("couldn't parse DB_TCP_TIMEOUT_MS"),
Err(_) => 15 * 1000, // 15 seconds
};

let enforce_tls = base.env == Env::Production;

match dotenv::var("DB_OFFLINE").as_deref() {
match dotenvy::var("DB_OFFLINE").as_deref() {
// The actual leader is down, use the follower in read-only mode as the primary and
// don't configure a replica.
Ok("leader") => Self {
Expand Down
2 changes: 1 addition & 1 deletion src/controllers/site_metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ pub async fn show_deployed_sha(state: AppState) -> impl IntoResponse {
let read_only = state.config.db.are_all_read_only();

let deployed_sha =
dotenv::var("HEROKU_SLUG_COMMIT").unwrap_or_else(|_| String::from("unknown"));
dotenvy::var("HEROKU_SLUG_COMMIT").unwrap_or_else(|_| String::from("unknown"));

Json(json!({
"deployed_sha": &deployed_sha[..],
Expand Down
Loading