diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 1266ae51f..4acf14277 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -36,6 +36,16 @@ jobs: toolchain: stable components: llvm-tools-preview - uses: Swatinem/rust-cache@v1 + - name: Check Rust Code + uses: actions-rs/cargo@v1 + with: + command: check + args: --all-targets + - name: Clippy Rust Code + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --all-targets - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests diff --git a/.vscode/extensions.json b/.vscode/extensions.json index b55ef8bf6..11d11a5c5 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,6 +1,6 @@ { "recommendations": [ "streetsidesoftware.code-spell-checker", - "matklad.rust-analyzer" + "rust-lang.rust-analyzer" ] } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 72e8db7e0..f1027e9bd 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,17 +1,6 @@ { - "cSpell.words": [ - "byteorder", - "hasher", - "leechers", - "nanos", - "rngs", - "Seedable", - "thiserror", - "torrust", - "typenum" - ], "[rust]": { "editor.formatOnSave": true }, "rust-analyzer.checkOnSave.command": "clippy", -} +} \ No newline at end of file diff --git a/cSpell.json b/cSpell.json new file mode 100644 index 000000000..1df69e4e7 --- /dev/null +++ b/cSpell.json @@ -0,0 +1,57 @@ +{ + "words": [ + "AUTOINCREMENT", + "automock", + "Avicora", + "Azureus", + "bencode", + "binascii", + "Bitflu", + "bufs", + "byteorder", + "canonicalize", + "canonicalized", + "chrono", + "clippy", + "completei", + "downloadedi", + "filesd", + "Freebox", + "hasher", + "hexlify", + "Hydranode", + "incompletei", + "intervali", + "leecher", + "leechers", + "libtorrent", + "Lphant", + "mockall", + "nanos", + "nextest", + "nocapture", + "ostr", + "Pando", + "Rasterbar", + "repr", + "rngs", + "rusqlite", + "rustfmt", + "Seedable", + "Shareaza", + "sharktorrent", + "socketaddr", + "sqllite", + "Swatinem", + "Swiftbit", + "thiserror", + "Torrentstorm", + "torrust", + "typenum", + "Unamed", + "untuple", + "Vagaa", + "Xtorrent", + "Xunlei" + ] +} diff --git a/src/api/server.rs b/src/api/server.rs index 5285c9b2b..5a604aa0c 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -9,7 +9,7 @@ use warp::{filters, reply, serve, Filter}; use crate::peer::TorrentPeer; use crate::protocol::common::*; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; #[derive(Deserialize, Debug)] struct TorrentInfoQuery { @@ -129,10 +129,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let view_stats_list = filters::method::get() .and(filters::path::path("stats")) .and(filters::path::end()) - .map(move || { - let tracker = api_stats.clone(); - tracker - }) + .map(move || api_stats.clone()) .and_then(|tracker: Arc| async move { let mut results = Stats { torrents: 0, @@ -304,10 +301,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("whitelist")) .and(filters::path::path("reload")) .and(filters::path::end()) - .map(move || { - let tracker = t7.clone(); - tracker - }) + .map(move || t7.clone()) .and_then(|tracker: Arc| async move { match tracker.load_whitelist().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), @@ -324,10 +318,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("keys")) .and(filters::path::path("reload")) .and(filters::path::end()) - .map(move || { - let tracker = t8.clone(); - tracker - }) + .map(move || t8.clone()) .and_then(|tracker: Arc| async move { match tracker.load_keys().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), diff --git a/src/config.rs b/src/config.rs index b59d572ea..8c17070d2 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,14 +12,14 @@ use {std, toml}; use crate::databases::database::DatabaseDrivers; use crate::mode::TrackerMode; -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct UdpTrackerConfig { pub enabled: bool, pub bind_address: String, } #[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpTrackerConfig { pub enabled: bool, pub bind_address: String, @@ -30,14 +30,14 @@ pub struct HttpTrackerConfig { pub ssl_key_path: Option, } -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpApiConfig { pub enabled: bool, pub bind_address: String, pub access_tokens: HashMap, } -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, pub mode: TrackerMode, @@ -140,9 +140,9 @@ impl Configuration { eprintln!("Creating config file.."); let config = Configuration::default(); let _ = config.save_to_file(path); - return Err(ConfigError::Message(format!( - "Please edit the config.TOML in the root folder and restart the tracker." - ))); + return Err(ConfigError::Message( + "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), + )); } let torrust_config: Configuration = config @@ -152,7 +152,7 @@ impl Configuration { Ok(torrust_config) } - pub fn save_to_file(&self, path: &str) -> Result<(), ()> { + pub fn save_to_file(&self, path: &str) -> Result<(), ConfigurationError> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); Ok(()) @@ -236,7 +236,7 @@ mod tests { let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); // Convert to argument type for Configuration::save_to_file - let config_file_path = temp_file.clone(); + let config_file_path = temp_file; let path = config_file_path.to_string_lossy().to_string(); let default_configuration = Configuration::default(); diff --git a/src/databases/database.rs b/src/databases/database.rs index adc735fd2..c67f39a54 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -7,7 +7,7 @@ use crate::databases::sqlite::SqliteDatabase; use crate::tracker::key::AuthKey; use crate::InfoHash; -#[derive(Serialize, Deserialize, PartialEq, Debug)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub enum DatabaseDrivers { Sqlite3, MySQL, @@ -55,7 +55,7 @@ pub trait Database: Sync + Send { async fn remove_key_from_keys(&self, key: &str) -> Result; } -#[derive(Debug, Display, PartialEq, Error)] +#[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum Error { #[display(fmt = "Query returned no rows.")] diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index 33287df6d..a4d870101 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -19,7 +19,7 @@ pub struct MysqlDatabase { impl MysqlDatabase { pub fn new(db_path: &str) -> Result { - let opts = Opts::from_url(&db_path).expect("Failed to connect to MySQL database."); + let opts = Opts::from_url(db_path).expect("Failed to connect to MySQL database."); let builder = OptsBuilder::from_opts(opts); let manager = MysqlConnectionManager::new(builder); let pool = r2d2::Pool::builder() diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index fb66c0b94..ef9f12d9c 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -135,7 +135,7 @@ impl Database for SqliteDatabase { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; - let mut rows = stmt.query(&[info_hash])?; + let mut rows = stmt.query([info_hash])?; if let Some(row) = rows.next()? { let info_hash: String = row.get(0).unwrap(); @@ -223,7 +223,7 @@ impl Database for SqliteDatabase { async fn remove_key_from_keys(&self, key: &str) -> Result { let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; - match conn.execute("DELETE FROM keys WHERE key = ?", &[key]) { + match conn.execute("DELETE FROM keys WHERE key = ?", [key]) { Ok(updated) => { if updated > 0 { return Ok(updated); diff --git a/src/http/filters.rs b/src/http/filters.rs index 514cb804c..42d1592ff 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -7,7 +7,7 @@ use warp::{reject, Filter, Rejection}; use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; use crate::tracker::key::AuthKey; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; /// Pass Arc along @@ -61,12 +61,12 @@ pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter WebResult> { - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut info_hashes: Vec = Vec::new(); for v in split_raw_query { if v.contains("info_hash") { - let raw_info_hash = v.split("=").collect::>()[1]; + let raw_info_hash = v.split('=').collect::>()[1]; let info_hash_bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); let info_hash = InfoHash::from_str(&hex::encode(info_hash_bytes)); if let Ok(ih) = info_hash { @@ -77,7 +77,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { Err(reject::custom(ServerError::ExceededInfoHashLimit)) - } else if info_hashes.len() < 1 { + } else if info_hashes.is_empty() { Err(reject::custom(ServerError::InvalidInfoHash)) } else { Ok(info_hashes) @@ -87,7 +87,7 @@ async fn info_hashes(raw_query: String) -> WebResult> { /// Parse PeerId from raw query string async fn peer_id(raw_query: String) -> WebResult { // put all query params in a vec - let split_raw_query: Vec<&str> = raw_query.split("&").collect(); + let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut peer_id: Option = None; @@ -95,7 +95,7 @@ async fn peer_id(raw_query: String) -> WebResult { // look for the peer_id param if v.contains("peer_id") { // get raw percent_encoded peer_id - let raw_peer_id = v.split("=").collect::>()[1]; + let raw_peer_id = v.split('=').collect::>()[1]; // decode peer_id let peer_id_bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); @@ -143,7 +143,7 @@ async fn peer_addr( // set client ip to last forwarded ip let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - IpAddr::from_str(x_forwarded_ip).or_else(|_| Err(reject::custom(ServerError::AddressNotFound))) + IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(ServerError::AddressNotFound)) } false => Ok(remote_addr.unwrap().ip()), } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 5214bbe6e..87d2d51f6 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -15,7 +15,7 @@ use crate::peer::TorrentPeer; use crate::tracker::key::AuthKey; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::InfoHash; /// Authenticate InfoHash using optional AuthKey @@ -93,7 +93,7 @@ pub async fn handle_scrape( let db = tracker.get_torrents().await; for info_hash in scrape_request.info_hashes.iter() { - let scrape_entry = match db.get(&info_hash) { + let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); @@ -117,7 +117,7 @@ pub async fn handle_scrape( }, }; - files.insert(info_hash.clone(), scrape_entry); + files.insert(*info_hash, scrape_entry); } // send stats event diff --git a/src/http/response.rs b/src/http/response.rs index 4db12f995..c87b5e0e8 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -38,34 +38,34 @@ impl AnnounceResponse { for peer in &self.peers { match peer.ip { IpAddr::V4(ip) => { - peers_v4.write(&u32::from(ip).to_be_bytes())?; - peers_v4.write(&peer.port.to_be_bytes())?; + peers_v4.write_all(&u32::from(ip).to_be_bytes())?; + peers_v4.write_all(&peer.port.to_be_bytes())?; } IpAddr::V6(ip) => { - peers_v6.write(&u128::from(ip).to_be_bytes())?; - peers_v6.write(&peer.port.to_be_bytes())?; + peers_v6.write_all(&u128::from(ip).to_be_bytes())?; + peers_v6.write_all(&peer.port.to_be_bytes())?; } } } let mut bytes: Vec = Vec::new(); - bytes.write(b"d8:intervali")?; - bytes.write(&self.interval.to_string().as_bytes())?; - bytes.write(b"e12:min intervali")?; - bytes.write(&self.interval_min.to_string().as_bytes())?; - bytes.write(b"e8:completei")?; - bytes.write(&self.complete.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(&self.incomplete.to_string().as_bytes())?; - bytes.write(b"e5:peers")?; - bytes.write(&peers_v4.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v4.as_slice())?; - bytes.write(b"e6:peers6")?; - bytes.write(&peers_v6.len().to_string().as_bytes())?; - bytes.write(b":")?; - bytes.write(peers_v6.as_slice())?; - bytes.write(b"e")?; + bytes.write_all(b"d8:intervali")?; + bytes.write_all(self.interval.to_string().as_bytes())?; + bytes.write_all(b"e12:min intervali")?; + bytes.write_all(self.interval_min.to_string().as_bytes())?; + bytes.write_all(b"e8:completei")?; + bytes.write_all(self.complete.to_string().as_bytes())?; + bytes.write_all(b"e10:incompletei")?; + bytes.write_all(self.incomplete.to_string().as_bytes())?; + bytes.write_all(b"e5:peers")?; + bytes.write_all(peers_v4.len().to_string().as_bytes())?; + bytes.write_all(b":")?; + bytes.write_all(peers_v4.as_slice())?; + bytes.write_all(b"e6:peers6")?; + bytes.write_all(peers_v6.len().to_string().as_bytes())?; + bytes.write_all(b":")?; + bytes.write_all(peers_v6.as_slice())?; + bytes.write_all(b"e")?; Ok(bytes) } @@ -87,21 +87,21 @@ impl ScrapeResponse { pub fn write(&self) -> Result, Box> { let mut bytes: Vec = Vec::new(); - bytes.write(b"d5:filesd")?; + bytes.write_all(b"d5:filesd")?; for (info_hash, scrape_response_entry) in self.files.iter() { - bytes.write(b"20:")?; - bytes.write(&info_hash.0)?; - bytes.write(b"d8:completei")?; - bytes.write(scrape_response_entry.complete.to_string().as_bytes())?; - bytes.write(b"e10:downloadedi")?; - bytes.write(scrape_response_entry.downloaded.to_string().as_bytes())?; - bytes.write(b"e10:incompletei")?; - bytes.write(scrape_response_entry.incomplete.to_string().as_bytes())?; - bytes.write(b"ee")?; + bytes.write_all(b"20:")?; + bytes.write_all(&info_hash.0)?; + bytes.write_all(b"d8:completei")?; + bytes.write_all(scrape_response_entry.complete.to_string().as_bytes())?; + bytes.write_all(b"e10:downloadedi")?; + bytes.write_all(scrape_response_entry.downloaded.to_string().as_bytes())?; + bytes.write_all(b"e10:incompletei")?; + bytes.write_all(scrape_response_entry.incomplete.to_string().as_bytes())?; + bytes.write_all(b"ee")?; } - bytes.write(b"ee")?; + bytes.write_all(b"ee")?; Ok(bytes) } diff --git a/src/http/routes.rs b/src/http/routes.rs index a9ca3027f..8bfaf5ed9 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -6,7 +6,7 @@ use warp::{Filter, Rejection}; use crate::http::{ handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker, }; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; /// All routes pub fn routes(tracker: Arc) -> impl Filter + Clone { diff --git a/src/http/server.rs b/src/http/server.rs index 8b92d8792..4e48f97e3 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; use crate::http::routes; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; /// Server that listens on HTTP, needs a TorrentTracker #[derive(Clone)] diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index ef67f0a7e..2d8f307b4 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use log::{info, warn}; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{HttpServer, HttpTrackerConfig}; pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 6e4b0c77e..04b064043 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -4,7 +4,7 @@ use chrono::Utc; use log::info; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index f3c9ae788..97b1fa3b0 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -4,7 +4,7 @@ use log::info; use tokio::task::JoinHandle; use crate::api::server; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index f93979c9f..00fdaddbe 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::{UdpServer, UdpTrackerConfig}; pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { diff --git a/src/main.rs b/src/main.rs index 08061cd7b..bf832dbf4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use log::info; use torrust_tracker::stats::setup_statistics; -use torrust_tracker::tracker::tracker::TorrentTracker; +use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; #[tokio::main] diff --git a/src/protocol/common.rs b/src/protocol/common.rs index f1bd6a99c..431521764 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -26,17 +26,9 @@ pub enum AnnounceEventDef { #[serde(remote = "NumberOfBytes")] pub struct NumberOfBytesDef(pub i64); -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, Ord)] +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct InfoHash(pub [u8; 20]); -impl InfoHash { - pub fn to_string(&self) -> String { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - String::from(std::str::from_utf8(bytes_out).unwrap()) - } -} - impl std::fmt::Display for InfoHash { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let mut chars = [0u8; 40]; @@ -49,7 +41,7 @@ impl std::str::FromStr for InfoHash { type Err = binascii::ConvertError; fn from_str(s: &str) -> Result { - let mut i = Self { 0: [0u8; 20] }; + let mut i = Self([0u8; 20]); if s.len() != 40 { return Err(binascii::ConvertError::InvalidInputLength); } @@ -58,6 +50,12 @@ impl std::str::FromStr for InfoHash { } } +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + impl std::cmp::PartialOrd for InfoHash { fn partial_cmp(&self, other: &InfoHash) -> Option { self.0.partial_cmp(&other.0) @@ -67,15 +65,15 @@ impl std::cmp::PartialOrd for InfoHash { impl std::convert::From<&[u8]> for InfoHash { fn from(data: &[u8]) -> InfoHash { assert_eq!(data.len(), 20); - let mut ret = InfoHash { 0: [0u8; 20] }; + let mut ret = InfoHash([0u8; 20]); ret.0.clone_from_slice(data); - return ret; + ret } } -impl std::convert::Into for [u8; 20] { - fn into(self) -> InfoHash { - InfoHash { 0: self } +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) } } @@ -206,15 +204,15 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { )); } - let mut res = InfoHash { 0: [0u8; 20] }; + let mut res = InfoHash([0u8; 20]); - if let Err(_) = binascii::hex2bin(v.as_bytes(), &mut res.0) { + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(v), &"expected a hexadecimal string", )); } else { - return Ok(res); + Ok(res) } } } @@ -222,15 +220,14 @@ impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { #[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord)] pub struct PeerId(pub [u8; 20]); -impl PeerId { - pub fn to_string(&self) -> String { +impl std::fmt::Display for PeerId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut buffer = [0u8; 20]; let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - return if let Some(bytes_out) = bytes_out { - String::from(std::str::from_utf8(bytes_out).unwrap()) - } else { - "".to_string() - }; + match bytes_out { + Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), + None => write!(f, ""), + } } } diff --git a/src/setup.rs b/src/setup.rs index 0c5ed9004..2ecc1c143 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -4,7 +4,7 @@ use log::warn; use tokio::task::JoinHandle; use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::Configuration; pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { @@ -35,7 +35,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< udp_tracker_config.bind_address, config.mode ); } else { - jobs.push(udp_tracker::start_job(&udp_tracker_config, tracker.clone())) + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())) } } @@ -44,17 +44,17 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< if !http_tracker_config.enabled { continue; } - jobs.push(http_tracker::start_job(&http_tracker_config, tracker.clone())); + jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone())); } // Start HTTP API server if config.http_api.enabled { - jobs.push(tracker_api::start_job(&config, tracker.clone())); + jobs.push(tracker_api::start_job(config, tracker.clone())); } // Remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config, tracker.clone())); + jobs.push(torrent_cleanup::start_job(config, tracker.clone())); } jobs diff --git a/src/tracker/key.rs b/src/tracker/key.rs index c513b48da..1bf0557a1 100644 --- a/src/tracker/key.rs +++ b/src/tracker/key.rs @@ -63,7 +63,7 @@ impl AuthKey { } } -#[derive(Debug, Display, PartialEq, Error)] +#[derive(Debug, Display, PartialEq, Eq, Error)] #[allow(dead_code)] pub enum Error { #[display(fmt = "Key could not be verified.")] diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index bbb027a35..77f51098a 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -3,4 +3,272 @@ pub mod mode; pub mod peer; pub mod statistics; pub mod torrent; -pub mod tracker; + +use std::collections::btree_map::Entry; +use std::collections::BTreeMap; +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{RwLock, RwLockReadGuard}; + +use crate::databases::database; +use crate::databases::database::Database; +use crate::mode::TrackerMode; +use crate::peer::TorrentPeer; +use crate::protocol::common::InfoHash; +use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; +use crate::tracker::key::AuthKey; +use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; +use crate::Configuration; + +pub struct TorrentTracker { + pub config: Arc, + mode: TrackerMode, + keys: RwLock>, + whitelist: RwLock>, + torrents: RwLock>, + stats_event_sender: Option>, + stats_repository: StatsRepository, + database: Box, +} + +impl TorrentTracker { + pub fn new( + config: Arc, + stats_event_sender: Option>, + stats_repository: StatsRepository, + ) -> Result { + let database = database::connect_database(&config.db_driver, &config.db_path)?; + + Ok(TorrentTracker { + config: config.clone(), + mode: config.mode, + keys: RwLock::new(std::collections::HashMap::new()), + whitelist: RwLock::new(std::collections::HashSet::new()), + torrents: RwLock::new(std::collections::BTreeMap::new()), + stats_event_sender, + stats_repository, + database, + }) + } + + pub fn is_public(&self) -> bool { + self.mode == TrackerMode::Public + } + + pub fn is_private(&self) -> bool { + self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + } + + pub fn is_whitelisted(&self) -> bool { + self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + } + + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = key::generate_auth_key(lifetime); + self.database.add_key_to_keys(&auth_key).await?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { + self.database.remove_key_from_keys(key).await?; + self.keys.write().await.remove(key); + Ok(()) + } + + pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { + match self.keys.read().await.get(&auth_key.key) { + None => Err(key::Error::KeyInvalid), + Some(key) => key::verify_auth_key(key), + } + } + + pub async fn load_keys(&self) -> Result<(), database::Error> { + let keys_from_database = self.database.load_keys().await?; + let mut keys = self.keys.write().await; + + keys.clear(); + + for key in keys_from_database { + let _ = keys.insert(key.key.clone(), key); + } + + Ok(()) + } + + // Adding torrents is not relevant to public trackers. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_memory_whitelist(info_hash).await; + Ok(()) + } + + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.add_info_hash_to_whitelist(*info_hash).await?; + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + // Removing torrents is not relevant to public trackers. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + self.database.remove_info_hash_from_whitelist(*info_hash).await?; + self.whitelist.write().await.remove(info_hash); + Ok(()) + } + + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + pub async fn load_whitelist(&self) -> Result<(), database::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _ = whitelist.insert(info_hash); + } + + Ok(()) + } + + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + // no authentication needed in public mode + if self.is_public() { + return Ok(()); + } + + // check if auth_key is set and valid + if self.is_private() { + match key { + Some(key) => { + if self.verify_auth_key(key).await.is_err() { + return Err(TorrentError::PeerKeyNotValid); + } + } + None => { + return Err(TorrentError::PeerNotAuthenticated); + } + } + } + + // check if info_hash is whitelisted + if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { + return Err(TorrentError::TorrentNotWhitelisted); + } + + Ok(()) + } + + // Loading the torrents from database into memory + pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { + let persistent_torrents = self.database.load_persistent_torrents().await?; + let mut torrents = self.torrents.write().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(&info_hash) { + continue; + } + + let torrent_entry = TorrentEntry { + peers: Default::default(), + completed, + }; + + torrents.insert(info_hash, torrent_entry); + } + + Ok(()) + } + + /// Get all torrent peers for a given torrent filtering out the peer with the client address + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), + } + } + + /// Get all torrent peers for a given torrent + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + let read_lock = self.torrents.read().await; + + match read_lock.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), + } + } + + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { + let mut torrents = self.torrents.write().await; + + let torrent_entry = match torrents.entry(*info_hash) { + Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), + Entry::Occupied(entry) => entry.into_mut(), + }; + + let stats_updated = torrent_entry.update_peer(peer); + + // todo: move this action to a separate worker + if self.config.persistent_torrent_completed_stat && stats_updated { + let _ = self + .database + .save_persistent_torrent(info_hash, torrent_entry.completed) + .await; + } + + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + TorrentStats { + seeders, + leechers, + completed, + } + } + + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { + self.torrents.read().await + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + self.stats_repository.get_stats().await + } + + pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { + match &self.stats_event_sender { + None => None, + Some(stats_event_sender) => stats_event_sender.send_event(event).await, + } + } + + // Remove inactive peers and (optionally) peerless torrents + pub async fn cleanup_torrents(&self) { + let mut torrents_lock = self.torrents.write().await; + + // If we don't need to remove torrents we will use the faster iter + if self.config.remove_peerless_torrents { + torrents_lock.retain(|_, torrent_entry| { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + + match self.config.persistent_torrent_completed_stat { + true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), + false => !torrent_entry.peers.is_empty(), + } + }); + } else { + for (_, torrent_entry) in torrents_lock.iter_mut() { + torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + } + } + } +} diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index 9110b7f4f..f444b4523 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -1,7 +1,7 @@ use serde; use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)] +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum TrackerMode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 7ac35179a..7a2599f82 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -75,8 +75,8 @@ impl TorrentPeer { // potentially substitute localhost ip with external ip pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { - if remote_ip.is_loopback() && host_opt_ip.is_some() { - SocketAddr::new(host_opt_ip.unwrap(), port) + if let Some(host_ip) = host_opt_ip.filter(|_| remote_ip.is_loopback()) { + SocketAddr::new(host_ip, port) } else { SocketAddr::new(remote_ip, port) } diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index c4d4971af..ac3889270 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -10,7 +10,7 @@ use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] pub enum TrackerStatisticsEvent { Tcp4Announce, Tcp4Scrape, diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 65eaa0a40..4e602d359 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -82,6 +82,12 @@ impl TorrentEntry { } } +impl Default for TorrentEntry { + fn default() -> Self { + Self::new() + } +} + #[derive(Debug)] pub struct TorrentStats { pub completed: u32, diff --git a/src/tracker/tracker.rs b/src/tracker/tracker.rs deleted file mode 100644 index 7e74a3554..000000000 --- a/src/tracker/tracker.rs +++ /dev/null @@ -1,271 +0,0 @@ -use std::collections::btree_map::Entry; -use std::collections::BTreeMap; -use std::net::SocketAddr; -use std::sync::Arc; -use std::time::Duration; - -use tokio::sync::mpsc::error::SendError; -use tokio::sync::{RwLock, RwLockReadGuard}; - -use crate::databases::database; -use crate::databases::database::Database; -use crate::mode::TrackerMode; -use crate::peer::TorrentPeer; -use crate::protocol::common::InfoHash; -use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; -use crate::tracker::key; -use crate::tracker::key::AuthKey; -use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -use crate::Configuration; - -pub struct TorrentTracker { - pub config: Arc, - mode: TrackerMode, - keys: RwLock>, - whitelist: RwLock>, - torrents: RwLock>, - stats_event_sender: Option>, - stats_repository: StatsRepository, - database: Box, -} - -impl TorrentTracker { - pub fn new( - config: Arc, - stats_event_sender: Option>, - stats_repository: StatsRepository, - ) -> Result { - let database = database::connect_database(&config.db_driver, &config.db_path)?; - - Ok(TorrentTracker { - config: config.clone(), - mode: config.mode, - keys: RwLock::new(std::collections::HashMap::new()), - whitelist: RwLock::new(std::collections::HashSet::new()), - torrents: RwLock::new(std::collections::BTreeMap::new()), - stats_event_sender, - stats_repository, - database, - }) - } - - pub fn is_public(&self) -> bool { - self.mode == TrackerMode::Public - } - - pub fn is_private(&self) -> bool { - self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed - } - - pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed - } - - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = key::generate_auth_key(lifetime); - self.database.add_key_to_keys(&auth_key).await?; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) - } - - pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { - self.database.remove_key_from_keys(&key).await?; - self.keys.write().await.remove(key); - Ok(()) - } - - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { - match self.keys.read().await.get(&auth_key.key) { - None => Err(key::Error::KeyInvalid), - Some(key) => key::verify_auth_key(key), - } - } - - pub async fn load_keys(&self) -> Result<(), database::Error> { - let keys_from_database = self.database.load_keys().await?; - let mut keys = self.keys.write().await; - - keys.clear(); - - for key in keys_from_database { - let _ = keys.insert(key.key.clone(), key); - } - - Ok(()) - } - - // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.add_torrent_to_database_whitelist(info_hash).await?; - self.add_torrent_to_memory_whitelist(info_hash).await; - Ok(()) - } - - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.add_info_hash_to_whitelist(*info_hash).await?; - Ok(()) - } - - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.insert(*info_hash) - } - - // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { - self.database.remove_info_hash_from_whitelist(info_hash.clone()).await?; - self.whitelist.write().await.remove(info_hash); - Ok(()) - } - - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.whitelist.read().await.contains(info_hash) - } - - pub async fn load_whitelist(&self) -> Result<(), database::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist().await?; - let mut whitelist = self.whitelist.write().await; - - whitelist.clear(); - - for info_hash in whitelisted_torrents_from_database { - let _ = whitelist.insert(info_hash); - } - - Ok(()) - } - - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { - // no authentication needed in public mode - if self.is_public() { - return Ok(()); - } - - // check if auth_key is set and valid - if self.is_private() { - match key { - Some(key) => { - if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid); - } - } - None => { - return Err(TorrentError::PeerNotAuthenticated); - } - } - } - - // check if info_hash is whitelisted - if self.is_whitelisted() { - if !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted); - } - } - - Ok(()) - } - - // Loading the torrents from database into memory - pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { - let persistent_torrents = self.database.load_persistent_torrents().await?; - let mut torrents = self.torrents.write().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = TorrentEntry { - peers: Default::default(), - completed, - }; - - torrents.insert(info_hash.clone(), torrent_entry); - } - - Ok(()) - } - - /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { - let read_lock = self.torrents.read().await; - - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), - } - } - - /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.read().await; - - match read_lock.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), - } - } - - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { - let mut torrents = self.torrents.write().await; - - let torrent_entry = match torrents.entry(info_hash.clone()) { - Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), - Entry::Occupied(entry) => entry.into_mut(), - }; - - let stats_updated = torrent_entry.update_peer(peer); - - // todo: move this action to a separate worker - if self.config.persistent_torrent_completed_stat && stats_updated { - let _ = self - .database - .save_persistent_torrent(&info_hash, torrent_entry.completed) - .await; - } - - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - TorrentStats { - seeders, - leechers, - completed, - } - } - - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { - self.torrents.read().await - } - - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { - self.stats_repository.get_stats().await - } - - pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { - match &self.stats_event_sender { - None => None, - Some(stats_event_sender) => stats_event_sender.send_event(event).await, - } - } - - // Remove inactive peers and (optionally) peerless torrents - pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.write().await; - - // If we don't need to remove torrents we will use the faster iter - if self.config.remove_peerless_torrents { - torrents_lock.retain(|_, torrent_entry| { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - - match self.config.persistent_torrent_completed_stat { - true => torrent_entry.completed > 0 || torrent_entry.peers.len() > 0, - false => torrent_entry.peers.len() > 0, - } - }); - } else { - for (_, torrent_entry) in torrents_lock.iter_mut() { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - } - } - } -} diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index a17431b9c..c40a56959 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -22,9 +22,8 @@ pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); - let cookie = cookie_builder::build(remote_address, &time_extent); //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); - cookie + cookie_builder::build(remote_address, &time_extent) } pub fn check_connection_cookie( diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index b962b1333..5514bc1eb 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -10,7 +10,7 @@ use super::connection_cookie::{check_connection_cookie, from_connection_id, into use crate::peer::TorrentPeer; use crate::tracker::statistics::TrackerStatisticsEvent; use crate::tracker::torrent::TorrentError; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::udp::errors::ServerError; use crate::udp::request::AnnounceRequestWrapper; use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; @@ -256,7 +256,7 @@ mod tests { use crate::peer::TorrentPeer; use crate::protocol::clock::{DefaultClock, Time}; use crate::statistics::StatsTracker; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::{Configuration, PeerId}; fn default_tracker_config() -> Arc { @@ -374,7 +374,7 @@ mod tests { use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -546,7 +546,7 @@ mod tests { use mockall::predicate::eq; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -667,8 +667,8 @@ mod tests { let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); - response + + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() } #[tokio::test] @@ -771,7 +771,7 @@ mod tests { use mockall::predicate::eq; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -899,8 +899,8 @@ mod tests { let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) .into(); - let response = handle_announce(remote_addr, &request, tracker.clone()).await.unwrap(); - response + + handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() } #[tokio::test] @@ -952,7 +952,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use crate::statistics::StatsTracker; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1013,7 +1013,7 @@ mod tests { }; use super::TorrentPeerBuilder; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; use crate::udp::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; @@ -1073,7 +1073,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), + connection_id: into_connection_id(&make_connection_cookie(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } @@ -1232,7 +1232,7 @@ mod tests { use super::sample_scrape_request; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; @@ -1265,7 +1265,7 @@ mod tests { use super::sample_scrape_request; use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::tracker::TorrentTracker; + use crate::tracker::TorrentTracker; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; diff --git a/src/udp/server.rs b/src/udp/server.rs index 11cb61d99..2f41c3c4d 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::tracker::tracker::TorrentTracker; +use crate::tracker::TorrentTracker; use crate::udp::{handle_packet, MAX_PACKET_SIZE}; pub struct UdpServer { diff --git a/tests/udp.rs b/tests/udp.rs index abd16427b..c88dc9885 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -19,7 +19,7 @@ mod udp_tracker_server { use tokio::task::JoinHandle; use torrust_tracker::jobs::udp_tracker; use torrust_tracker::tracker::statistics::StatsTracker; - use torrust_tracker::tracker::tracker::TorrentTracker; + use torrust_tracker::tracker::TorrentTracker; use torrust_tracker::udp::MAX_PACKET_SIZE; use torrust_tracker::{logging, static_time, Configuration}; @@ -67,7 +67,7 @@ mod udp_tracker_server { let udp_tracker_config = &configuration.udp_trackers[0]; // Start the UDP tracker job - self.job = Some(udp_tracker::start_job(&udp_tracker_config, tracker.clone())); + self.job = Some(udp_tracker::start_job(udp_tracker_config, tracker)); self.bind_address = Some(udp_tracker_config.bind_address.clone()); @@ -136,7 +136,7 @@ mod udp_tracker_server { Err(_) => panic!("could not write request to bytes."), }; - self.udp_client.send(&request_data).await + self.udp_client.send(request_data).await } async fn receive(&self) -> Response { @@ -178,30 +178,24 @@ mod udp_tracker_server { fn is_error_response(response: &Response, error_message: &str) -> bool { match response { - Response::Error(error_response) => return error_response.message.starts_with(error_message), - _ => return false, - }; + Response::Error(error_response) => error_response.message.starts_with(error_message), + _ => false, + } } fn is_connect_response(response: &Response, transaction_id: TransactionId) -> bool { match response { - Response::Connect(connect_response) => return connect_response.transaction_id == transaction_id, - _ => return false, - }; + Response::Connect(connect_response) => connect_response.transaction_id == transaction_id, + _ => false, + } } fn is_ipv4_announce_response(response: &Response) -> bool { - match response { - Response::AnnounceIpv4(_) => return true, - _ => return false, - }; + matches!(response, Response::AnnounceIpv4(_)) } fn is_scrape_response(response: &Response) -> bool { - match response { - Response::Scrape(_) => return true, - _ => return false, - }; + matches!(response, Response::Scrape(_)) } #[tokio::test]