Skip to content

Commit

Permalink
Revert "mptcp: add get_subflow wrappers" - fix divide error in mptcp_…
Browse files Browse the repository at this point in the history
…subflow_get_send

This reverts commit 8ae8437.

The wrapper mptcp_sched_get_send() will be added in the later patch
"mptcp: use get_send wrapper", and the wrapper mptcp_sched_get_retrans()
will be added in the later patch "mptcp: use get_retrans wrapper".

Fix this divide error:

----
divide error: 0000 [#1] PREEMPT SMP KASAN NOPTI
CPU: 0 PID: 14336 Comm: syz-executor.6 Not tainted 6.1.0-rc1-00215-g47aa7f23f440 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
RIP: 0010:div_u64_rem include/linux/math64.h:29 [inline]
RIP: 0010:div_u64 include/linux/math64.h:128 [inline]
RIP: 0010:mptcp_subflow_get_send+0xa87/0x1200 net/mptcp/protocol.c:1486
----

Closes: #314
Reported-by: Mat Martineau <[email protected]>
Signed-off-by: Geliang Tang <[email protected]>
Link: https://lore.kernel.org/r/95f77f38e54f9564608e844f507701c04745475b.1666668425.git.geliang.tang@suse.com
Signed-off-by: Matthieu Baerts <[email protected]>
  • Loading branch information
geliangtang authored and matttbe committed Oct 31, 2022
1 parent 6d193ac commit dce423f
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 88 deletions.
18 changes: 16 additions & 2 deletions net/mptcp/protocol.c
Original file line number Diff line number Diff line change
Expand Up @@ -1406,7 +1406,7 @@ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
* returns the subflow that will transmit the next DSS
* additionally updates the rtx timeout
*/
struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
{
struct subflow_send_info send_info[SSK_MODE_MAX];
struct mptcp_subflow_context *subflow;
Expand All @@ -1417,6 +1417,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
u64 linger_time;
long tout = 0;

sock_owned_by_me(sk);

if (__mptcp_check_fallback(msk)) {
if (!msk->first)
return NULL;
return __tcp_can_send(msk->first) &&
sk_stream_memory_free(msk->first) ? msk->first : NULL;
}

/* re-use last subflow, if the burst allow that */
if (msk->last_snd && msk->snd_burst > 0 &&
sk_stream_memory_free(msk->last_snd) &&
Expand Down Expand Up @@ -2210,12 +2219,17 @@ static void mptcp_timeout_timer(struct timer_list *t)
*
* A backup subflow is returned only if that is the only kind available.
*/
struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
{
struct sock *backup = NULL, *pick = NULL;
struct mptcp_subflow_context *subflow;
int min_stale_count = INT_MAX;

sock_owned_by_me((const struct sock *)msk);

if (__mptcp_check_fallback(msk))
return NULL;

mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

Expand Down
4 changes: 0 additions & 4 deletions net/mptcp/protocol.h
Original file line number Diff line number Diff line change
Expand Up @@ -641,10 +641,6 @@ int mptcp_init_sched(struct mptcp_sock *msk,
void mptcp_release_sched(struct mptcp_sock *msk);
void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
bool scheduled);
struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk);
struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk);
int mptcp_sched_get_send(struct mptcp_sock *msk);
int mptcp_sched_get_retrans(struct mptcp_sock *msk);

static inline bool __tcp_can_send(const struct sock *ssk)
{
Expand Down
82 changes: 0 additions & 82 deletions net/mptcp/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,85 +93,3 @@ void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
{
WRITE_ONCE(subflow->scheduled, scheduled);
}

static int mptcp_sched_data_init(struct mptcp_sock *msk, bool reinject,
struct mptcp_sched_data *data)
{
struct mptcp_subflow_context *subflow;
int i = 0;

data->reinject = reinject;

mptcp_for_each_subflow(msk, subflow) {
if (i == MPTCP_SUBFLOWS_MAX) {
pr_warn_once("too many subflows");
break;
}
mptcp_subflow_set_scheduled(subflow, false);
data->contexts[i++] = subflow;
}

for (; i < MPTCP_SUBFLOWS_MAX; i++)
data->contexts[i] = NULL;

msk->snd_burst = 0;

return 0;
}

int mptcp_sched_get_send(struct mptcp_sock *msk)
{
struct mptcp_sched_data data;
struct sock *ssk = NULL;

sock_owned_by_me((const struct sock *)msk);

/* the following check is moved out of mptcp_subflow_get_send */
if (__mptcp_check_fallback(msk)) {
if (msk->first &&
__tcp_can_send(msk->first) &&
sk_stream_memory_free(msk->first)) {
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
return 0;
}
return -EINVAL;
}

if (!msk->sched) {
ssk = mptcp_subflow_get_send(msk);
if (!ssk)
return -EINVAL;
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
return 0;
}

mptcp_sched_data_init(msk, false, &data);
msk->sched->get_subflow(msk, &data);

return 0;
}

int mptcp_sched_get_retrans(struct mptcp_sock *msk)
{
struct mptcp_sched_data data;
struct sock *ssk = NULL;

sock_owned_by_me((const struct sock *)msk);

/* the following check is moved out of mptcp_subflow_get_retrans */
if (__mptcp_check_fallback(msk))
return -EINVAL;

if (!msk->sched) {
ssk = mptcp_subflow_get_retrans(msk);
if (!ssk)
return -EINVAL;
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
return 0;
}

mptcp_sched_data_init(msk, true, &data);
msk->sched->get_subflow(msk, &data);

return 0;
}

0 comments on commit dce423f

Please sign in to comment.