Skip to content

Commit

Permalink
fix: sync WireGuard locations allowed devices after removing user gro…
Browse files Browse the repository at this point in the history
…up (#630)

* add basic confirmation modal for group remove

* bump rust version

* update dependencies

* add list of VPN location names to group info response

* remove deprecated methods

* handle vpn locations in group edit requests

* fix edit group modal

* fill in delete modal content

* update query data

* test group info in API response

* add more helper commands

* leave LDAP todos

* add failing test for group removal

* add comment

* add missing DB transaction commit

---------

Co-authored-by: Maciej Wójcik <[email protected]>
  • Loading branch information
wojcik91 and Maciej Wójcik authored Apr 24, 2024
1 parent da89afa commit bed66c5
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 0 deletions.
2 changes: 2 additions & 0 deletions src/db/models/wireguard.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,12 +185,14 @@ impl WireguardNetwork {

// run sync_allowed_devices on all wireguard networks
pub async fn sync_all_networks(app: &AppState) -> Result<(), WireguardNetworkError> {
info!("Syncing allowed devices for all WireGuard locations");
let mut transaction = app.pool.begin().await?;
let networks = Self::all(&mut *transaction).await?;
for network in networks {
let gateway_events = network.sync_allowed_devices(&mut transaction, None).await?;
app.send_multiple_wireguard_events(gateway_events);
}
transaction.commit().await?;
Ok(())
}

Expand Down
1 change: 1 addition & 0 deletions src/handlers/group.rs
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ pub(crate) async fn delete_group(
group.delete(&appstate.pool).await?;
// TODO: delete group from LDAP

// sync allowed devices for all locations
WireguardNetwork::sync_all_networks(&appstate).await?;

info!("Deleted group {name}");
Expand Down
52 changes: 52 additions & 0 deletions tests/wireguard_network_allowed_groups.rs
Original file line number Diff line number Diff line change
Expand Up @@ -577,3 +577,55 @@ async fn test_modify_user() {
assert_eq!(peers[0].pubkey, devices[0].wireguard_pubkey);
assert_eq!(peers[1].pubkey, devices[3].wireguard_pubkey);
}

#[tokio::test]
async fn test_delete_only_allowed_group() {
let (client, client_state) = make_test_client().await;
let (_users, devices) = setup_test_users(&client_state.pool).await;

let mut wg_rx = client_state.wireguard_rx;

let auth = Auth::new("admin", "pass123");
let response = &client.post("/api/v1/auth").json(&auth).send().await;
assert_eq!(response.status(), StatusCode::OK);

// create network with an allowed group
let response = client
.post("/api/v1/network")
.json(&json!({
"name": "network",
"address": "10.1.1.1/24",
"port": 55555,
"endpoint": "192.168.4.14",
"allowed_ips": "10.1.1.0/24",
"dns": "1.1.1.1",
"allowed_groups": ["allowed group"],
"mfa_enabled": false,
"keepalive_interval": 25,
"peer_disconnect_threshold": 180
}))
.send()
.await;
assert_eq!(response.status(), StatusCode::CREATED);
let network: WireguardNetwork = response.json().await;
assert_eq!(network.name, "network");
let event = wg_rx.try_recv().unwrap();
assert_matches!(event, GatewayEvent::NetworkCreated(..));

let peers = network.get_peers(&client_state.pool).await.unwrap();
assert_eq!(peers.len(), 2);
assert_eq!(peers[0].pubkey, devices[0].wireguard_pubkey);
assert_eq!(peers[1].pubkey, devices[1].wireguard_pubkey);

// remove an allowed group
let response = client.delete("/api/v1/group/allowed%20group").send().await;
assert_eq!(response.status(), StatusCode::OK);

// network configuration was created for all devices
let peers = network.get_peers(&client_state.pool).await.unwrap();
assert_eq!(peers.len(), 4);
assert_eq!(peers[0].pubkey, devices[0].wireguard_pubkey);
assert_eq!(peers[1].pubkey, devices[1].wireguard_pubkey);
assert_eq!(peers[2].pubkey, devices[2].wireguard_pubkey);
assert_eq!(peers[3].pubkey, devices[3].wireguard_pubkey);
}

0 comments on commit bed66c5

Please sign in to comment.