lkml.org 
[lkml]   [2019]   [Dec]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH net 1/3] rxrpc: Unlock new call in rxrpc_new_incoming_call() rather than the caller
From
Date
Move the unlock and the ping transmission for a new incoming call into
rxrpc_new_incoming_call() rather than doing it in the caller. This makes
it clearer to see what's going on.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
cc: Ingo Molnar <mingo@redhat.com>
cc: Will Deacon <will@kernel.org>
cc: Davidlohr Bueso <dave@stgolabs.net>
---

net/rxrpc/call_accept.c | 36 ++++++++++++++++++++++++++++--------
net/rxrpc/input.c | 18 ------------------
2 files changed, 28 insertions(+), 26 deletions(-)

diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 135bf5cd8dd5..3685b1732f65 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -239,6 +239,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
kfree(b);
}

+/*
+ * Ping the other end to fill our RTT cache and to retrieve the rwind
+ * and MTU parameters.
+ */
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ ktime_t now = skb->tstamp;
+
+ if (call->peer->rtt_usage < 3 ||
+ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+ true, true,
+ rxrpc_propose_ack_ping_for_params);
+}
+
/*
* Allocate a new incoming call from the prealloc pool, along with a connection
* and a peer as necessary.
@@ -346,9 +362,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
- _leave(" = NULL [close]");
- call = NULL;
- goto out;
+ goto no_call;
}

/* The peer, connection and call may all have sprung into existence due
@@ -361,9 +375,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) {
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
- _leave(" = NULL [busy]");
- call = NULL;
- goto out;
+ goto no_call;
}

trace_rxrpc_receive(call, rxrpc_receive_incoming,
@@ -432,10 +444,18 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
*/
rxrpc_put_call(call, rxrpc_call_put);

- _leave(" = %p{%d}", call, call->debug_id);
-out:
spin_unlock(&rx->incoming_lock);
+
+ rxrpc_send_ping(call, skb);
+ mutex_unlock(&call->user_mutex);
+
+ _leave(" = %p{%d}", call, call->debug_id);
return call;
+
+no_call:
+ spin_unlock(&rx->incoming_lock);
+ _leave(" = NULL [%u]", skb->mark);
+ return NULL;
}

/*
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 157be1ff8697..86bd133b4fa0 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -192,22 +192,6 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
goto out_no_clear_ca;
}

-/*
- * Ping the other end to fill our RTT cache and to retrieve the rwind
- * and MTU parameters.
- */
-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- ktime_t now = skb->tstamp;
-
- if (call->peer->rtt_usage < 3 ||
- ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
- true, true,
- rxrpc_propose_ack_ping_for_params);
-}
-
/*
* Apply a hard ACK by advancing the Tx window.
*/
@@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
call = rxrpc_new_incoming_call(local, rx, skb);
if (!call)
goto reject_packet;
- rxrpc_send_ping(call, skb);
- mutex_unlock(&call->user_mutex);
}

/* Process a call packet; this either discards or passes on the ref
\
 
 \ /
  Last update: 2019-12-21 00:06    [W:0.035 / U:3.948 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site