Skip to content

Commit 2da7142

Browse files
Jon Paul Maloydavem330
authored andcommitted
tipc: drop tunneled packet duplicates at reception
In commit 8b4ed86 ("tipc: eliminate race condition at dual link establishment") we introduced a parallel link synchronization mechanism that guarentees sequential delivery even for users switching from an old to a newly established link. The new mechanism makes it unnecessary to deliver the tunneled duplicate packets back to the old link, as we are currently doing. It is now sufficient to use the last tunneled packet's inner sequence number as synchronization point between the two parallel links, whereafter it can be dropped. In this commit, we drop the duplicate packets arriving on the new link, after updating the synchronization point at each new arrival. Although it would now have been sufficient for the other endpoint to only tunnel the last packet in its send queue, and not the entire queue, we must still do this to maintain compatibility with older nodes. This commit makes it possible to get rid if some complex interaction between the two parallel links. Reviewed-by: Ying Xue <[email protected]> Signed-off-by: Jon Maloy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent c5531ca commit 2da7142

File tree

1 file changed

+47
-85
lines changed

1 file changed

+47
-85
lines changed

net/tipc/link.c

Lines changed: 47 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -105,8 +105,6 @@ static void link_handle_out_of_seq_msg(struct tipc_link *link,
105105
struct sk_buff *skb);
106106
static void tipc_link_proto_rcv(struct tipc_link *link,
107107
struct sk_buff *skb);
108-
static int tipc_link_tunnel_rcv(struct tipc_node *node,
109-
struct sk_buff **skb);
110108
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
111109
static void link_state_event(struct tipc_link *l_ptr, u32 event);
112110
static void link_reset_statistics(struct tipc_link *l_ptr);
@@ -115,7 +113,8 @@ static void tipc_link_sync_xmit(struct tipc_link *l);
115113
static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
116114
static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
117115
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
118-
116+
static bool tipc_link_failover_rcv(struct tipc_node *node,
117+
struct sk_buff **skb);
119118
/*
120119
* Simple link routines
121120
*/
@@ -1274,8 +1273,10 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
12741273
if (msg_dup(msg)) {
12751274
link->flags |= LINK_SYNCHING;
12761275
link->synch_point = msg_seqno(msg_get_wrapped(msg));
1276+
kfree_skb(skb);
1277+
break;
12771278
}
1278-
if (!tipc_link_tunnel_rcv(node, &skb))
1279+
if (!tipc_link_failover_rcv(node, &skb))
12791280
break;
12801281
if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
12811282
tipc_data_input(link, skb);
@@ -1755,101 +1756,62 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
17551756
goto tunnel_queue;
17561757
}
17571758

1758-
/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1759-
* Owner node is locked.
1760-
*/
1761-
static void tipc_link_dup_rcv(struct tipc_link *link,
1762-
struct sk_buff *skb)
1763-
{
1764-
struct sk_buff *iskb;
1765-
int pos = 0;
1766-
1767-
if (!tipc_link_is_up(link))
1768-
return;
1769-
1770-
if (!tipc_msg_extract(skb, &iskb, &pos)) {
1771-
pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1772-
return;
1773-
}
1774-
/* Append buffer to deferred queue, if applicable: */
1775-
link_handle_out_of_seq_msg(link, iskb);
1776-
}
1777-
17781759
/* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
17791760
* Owner node is locked.
17801761
*/
1781-
static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1782-
struct sk_buff *t_buf)
1762+
static bool tipc_link_failover_rcv(struct tipc_node *node,
1763+
struct sk_buff **skb)
17831764
{
1784-
struct tipc_msg *t_msg = buf_msg(t_buf);
1785-
struct sk_buff *buf = NULL;
1786-
struct tipc_msg *msg;
1765+
struct tipc_msg *msg = buf_msg(*skb);
1766+
struct sk_buff *iskb = NULL;
1767+
struct tipc_link *link = NULL;
1768+
int bearer_id = msg_bearer_id(msg);
17871769
int pos = 0;
17881770

1789-
if (tipc_link_is_up(l_ptr))
1790-
tipc_link_reset(l_ptr);
1791-
1792-
/* First failover packet? */
1793-
if (l_ptr->exp_msg_count == START_CHANGEOVER)
1794-
l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1795-
1796-
/* Should there be an inner packet? */
1797-
if (l_ptr->exp_msg_count) {
1798-
l_ptr->exp_msg_count--;
1799-
if (!tipc_msg_extract(t_buf, &buf, &pos)) {
1800-
pr_warn("%sno inner failover pkt\n", link_co_err);
1801-
goto exit;
1802-
}
1803-
msg = buf_msg(buf);
1804-
1805-
if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1806-
kfree_skb(buf);
1807-
buf = NULL;
1808-
goto exit;
1809-
}
1810-
if (msg_user(msg) == MSG_FRAGMENTER) {
1811-
l_ptr->stats.recv_fragments++;
1812-
tipc_buf_append(&l_ptr->reasm_buf, &buf);
1813-
}
1771+
if (msg_type(msg) != ORIGINAL_MSG) {
1772+
pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1773+
goto exit;
18141774
}
1815-
exit:
1816-
if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
1817-
tipc_link_delete(l_ptr);
1818-
return buf;
1819-
}
1820-
1821-
/* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1822-
* via other link as result of a failover (ORIGINAL_MSG) or
1823-
* a new active link (DUPLICATE_MSG). Failover packets are
1824-
* returned to the active link for delivery upwards.
1825-
* Owner node is locked.
1826-
*/
1827-
static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1828-
struct sk_buff **buf)
1829-
{
1830-
struct sk_buff *t_buf = *buf;
1831-
struct tipc_link *l_ptr;
1832-
struct tipc_msg *t_msg = buf_msg(t_buf);
1833-
u32 bearer_id = msg_bearer_id(t_msg);
1775+
if (bearer_id >= MAX_BEARERS)
1776+
goto exit;
1777+
link = node->links[bearer_id];
1778+
if (!link)
1779+
goto exit;
1780+
if (tipc_link_is_up(link))
1781+
tipc_link_reset(link);
18341782

1835-
*buf = NULL;
1783+
/* First failover packet? */
1784+
if (link->exp_msg_count == START_CHANGEOVER)
1785+
link->exp_msg_count = msg_msgcnt(msg);
18361786

1837-
if (bearer_id >= MAX_BEARERS)
1787+
/* Should we expect an inner packet? */
1788+
if (!link->exp_msg_count)
18381789
goto exit;
18391790

1840-
l_ptr = n_ptr->links[bearer_id];
1841-
if (!l_ptr)
1791+
if (!tipc_msg_extract(*skb, &iskb, &pos)) {
1792+
pr_warn("%sno inner failover pkt\n", link_co_err);
1793+
*skb = NULL;
18421794
goto exit;
1795+
}
1796+
link->exp_msg_count--;
1797+
*skb = NULL;
18431798

1844-
if (msg_type(t_msg) == DUPLICATE_MSG)
1845-
tipc_link_dup_rcv(l_ptr, t_buf);
1846-
else if (msg_type(t_msg) == ORIGINAL_MSG)
1847-
*buf = tipc_link_failover_rcv(l_ptr, t_buf);
1848-
else
1849-
pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1799+
/* Was packet already delivered? */
1800+
if (less(buf_seqno(iskb), link->reset_checkpoint)) {
1801+
kfree_skb(iskb);
1802+
iskb = NULL;
1803+
goto exit;
1804+
}
1805+
if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
1806+
link->stats.recv_fragments++;
1807+
tipc_buf_append(&link->reasm_buf, &iskb);
1808+
}
18501809
exit:
1851-
kfree_skb(t_buf);
1852-
return *buf != NULL;
1810+
if (link && (!link->exp_msg_count) && (link->flags & LINK_STOPPED))
1811+
tipc_link_delete(link);
1812+
kfree_skb(*skb);
1813+
*skb = iskb;
1814+
return *skb;
18531815
}
18541816

18551817
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)

0 commit comments

Comments
 (0)