mptcp: use mptcp_schedule_work instead of open-coding it
Beyond reducing code duplication this also avoids scheduling
the mptcp_worker on a closed socket on some edge scenarios.
The addressed issue is actually older than the blamed commit
below, but this fix needs it as a pre-requisite.
Fixes: ba8f48f7a4
("mptcp: introduce mptcp_schedule_work")
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
5b7be2d4fd
commit
a5cb752b12
|
@ -1192,9 +1192,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
|
|||
*/
|
||||
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
|
||||
if (mp_opt.data_fin && mp_opt.data_len == 1 &&
|
||||
mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
|
||||
schedule_work(&msk->work))
|
||||
sock_hold(subflow->conn);
|
||||
mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
|
||||
mptcp_schedule_work((struct sock *)msk);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -408,9 +408,8 @@ void mptcp_subflow_reset(struct sock *ssk)
|
|||
|
||||
tcp_send_active_reset(ssk, GFP_ATOMIC);
|
||||
tcp_done(ssk);
|
||||
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
|
||||
schedule_work(&mptcp_sk(sk)->work))
|
||||
return; /* worker will put sk for us */
|
||||
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
|
||||
mptcp_schedule_work(sk);
|
||||
|
||||
sock_put(sk);
|
||||
}
|
||||
|
@ -1118,8 +1117,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
|
|||
skb_ext_del(skb, SKB_EXT_MPTCP);
|
||||
return MAPPING_OK;
|
||||
} else {
|
||||
if (updated && schedule_work(&msk->work))
|
||||
sock_hold((struct sock *)msk);
|
||||
if (updated)
|
||||
mptcp_schedule_work((struct sock *)msk);
|
||||
|
||||
return MAPPING_DATA_FIN;
|
||||
}
|
||||
|
@ -1222,17 +1221,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
|
|||
/* sched mptcp worker to remove the subflow if no more data is pending */
|
||||
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
|
||||
{
|
||||
struct sock *sk = (struct sock *)msk;
|
||||
|
||||
if (likely(ssk->sk_state != TCP_CLOSE))
|
||||
return;
|
||||
|
||||
if (skb_queue_empty(&ssk->sk_receive_queue) &&
|
||||
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
|
||||
sock_hold(sk);
|
||||
if (!schedule_work(&msk->work))
|
||||
sock_put(sk);
|
||||
}
|
||||
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
|
||||
mptcp_schedule_work((struct sock *)msk);
|
||||
}
|
||||
|
||||
static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
|
||||
|
|
Loading…
Reference in New Issue