Skip to content

Commit

Permalink
mptcp: add get_subflow wrappers
Browse files Browse the repository at this point in the history
This patch defines two new wrappers mptcp_sched_get_send() and
mptcp_sched_get_retrans(), invoke get_subflow() of msk->sched in them.

Set the subflow pointers array in struct mptcp_sched_data before invoking
get_subflow(), then it can be used in get_subflow() in the BPF contexts.

Check the subflow scheduled flags to test which subflow or subflows are
picked by the scheduler.

Move sock_owned_by_me() and the fallback check code from
mptcp_subflow_get_send/retrans() into the wrappers.

Acked-by: Paolo Abeni <[email protected]>
Reviewed-by: Mat Martineau <[email protected]>
Signed-off-by: Geliang Tang <[email protected]>
  • Loading branch information
geliangtang authored and jenkins-tessares committed Oct 24, 2022
1 parent 8db63fc commit 8ae8437
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 16 deletions.
18 changes: 2 additions & 16 deletions net/mptcp/protocol.c
Original file line number Diff line number Diff line change
Expand Up @@ -1406,7 +1406,7 @@ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
* returns the subflow that will transmit the next DSS
* additionally updates the rtx timeout
*/
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
{
struct subflow_send_info send_info[SSK_MODE_MAX];
struct mptcp_subflow_context *subflow;
Expand All @@ -1417,15 +1417,6 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
u64 linger_time;
long tout = 0;

sock_owned_by_me(sk);

if (__mptcp_check_fallback(msk)) {
if (!msk->first)
return NULL;
return __tcp_can_send(msk->first) &&
sk_stream_memory_free(msk->first) ? msk->first : NULL;
}

/* re-use last subflow, if the burst allow that */
if (msk->last_snd && msk->snd_burst > 0 &&
sk_stream_memory_free(msk->last_snd) &&
Expand Down Expand Up @@ -2219,17 +2210,12 @@ static void mptcp_timeout_timer(struct timer_list *t)
*
* A backup subflow is returned only if that is the only kind available.
*/
static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
{
struct sock *backup = NULL, *pick = NULL;
struct mptcp_subflow_context *subflow;
int min_stale_count = INT_MAX;

sock_owned_by_me((const struct sock *)msk);

if (__mptcp_check_fallback(msk))
return NULL;

mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

Expand Down
4 changes: 4 additions & 0 deletions net/mptcp/protocol.h
Original file line number Diff line number Diff line change
Expand Up @@ -641,6 +641,10 @@ int mptcp_init_sched(struct mptcp_sock *msk,
void mptcp_release_sched(struct mptcp_sock *msk);
void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
bool scheduled);
struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk);
struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk);
int mptcp_sched_get_send(struct mptcp_sock *msk);
int mptcp_sched_get_retrans(struct mptcp_sock *msk);

static inline bool __tcp_can_send(const struct sock *ssk)
{
Expand Down
82 changes: 82 additions & 0 deletions net/mptcp/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,85 @@ void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
{
WRITE_ONCE(subflow->scheduled, scheduled);
}

static int mptcp_sched_data_init(struct mptcp_sock *msk, bool reinject,
struct mptcp_sched_data *data)
{
struct mptcp_subflow_context *subflow;
int i = 0;

data->reinject = reinject;

mptcp_for_each_subflow(msk, subflow) {
if (i == MPTCP_SUBFLOWS_MAX) {
pr_warn_once("too many subflows");
break;
}
mptcp_subflow_set_scheduled(subflow, false);
data->contexts[i++] = subflow;
}

for (; i < MPTCP_SUBFLOWS_MAX; i++)
data->contexts[i] = NULL;

msk->snd_burst = 0;

return 0;
}

int mptcp_sched_get_send(struct mptcp_sock *msk)
{
struct mptcp_sched_data data;
struct sock *ssk = NULL;

sock_owned_by_me((const struct sock *)msk);

/* the following check is moved out of mptcp_subflow_get_send */
if (__mptcp_check_fallback(msk)) {
if (msk->first &&
__tcp_can_send(msk->first) &&
sk_stream_memory_free(msk->first)) {
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
return 0;
}
return -EINVAL;
}

if (!msk->sched) {
ssk = mptcp_subflow_get_send(msk);
if (!ssk)
return -EINVAL;
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
return 0;
}

mptcp_sched_data_init(msk, false, &data);
msk->sched->get_subflow(msk, &data);

return 0;
}

int mptcp_sched_get_retrans(struct mptcp_sock *msk)
{
struct mptcp_sched_data data;
struct sock *ssk = NULL;

sock_owned_by_me((const struct sock *)msk);

/* the following check is moved out of mptcp_subflow_get_retrans */
if (__mptcp_check_fallback(msk))
return -EINVAL;

if (!msk->sched) {
ssk = mptcp_subflow_get_retrans(msk);
if (!ssk)
return -EINVAL;
mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
return 0;
}

mptcp_sched_data_init(msk, true, &data);
msk->sched->get_subflow(msk, &data);

return 0;
}

0 comments on commit 8ae8437

Please sign in to comment.