Skip to content

Commit 369f3c7

Browse files
Eric Dumazetgregkh
Eric Dumazet
authored andcommitted
tcp: introduce tcp_skb_timestamp_us() helper
[ Upstream commit 2fd66ff ] There are few places where TCP reads skb->skb_mstamp expecting a value in usec unit. skb->tstamp (aka skb->skb_mstamp) will soon store CLOCK_TAI nsec value. Add tcp_skb_timestamp_us() to provide proper conversion when needed. Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]> Stable-dep-of: c8770db ("tcp: check skb is non-NULL in tcp_rto_delta_us()") Signed-off-by: Sasha Levin <[email protected]>
1 parent 4be585f commit 369f3c7

File tree

6 files changed

+26
-17
lines changed

6 files changed

+26
-17
lines changed

include/net/tcp.h

+7-1
Original file line numberDiff line numberDiff line change
@@ -794,6 +794,12 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
794794
return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
795795
}
796796

797+
/* provide the departure time in us unit */
798+
static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
799+
{
800+
return skb->skb_mstamp;
801+
}
802+
797803

798804
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
799805

@@ -2003,7 +2009,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
20032009
{
20042010
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
20052011
u32 rto = inet_csk(sk)->icsk_rto;
2006-
u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
2012+
u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
20072013

20082014
return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
20092015
}

net/ipv4/tcp_input.c

+6-5
Original file line numberDiff line numberDiff line change
@@ -1301,7 +1301,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
13011301
*/
13021302
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
13031303
start_seq, end_seq, dup_sack, pcount,
1304-
skb->skb_mstamp);
1304+
tcp_skb_timestamp_us(skb));
13051305
tcp_rate_skb_delivered(sk, skb, state->rate);
13061306

13071307
if (skb == tp->lost_skb_hint)
@@ -1590,7 +1590,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
15901590
TCP_SKB_CB(skb)->end_seq,
15911591
dup_sack,
15921592
tcp_skb_pcount(skb),
1593-
skb->skb_mstamp);
1593+
tcp_skb_timestamp_us(skb));
15941594
tcp_rate_skb_delivered(sk, skb, state->rate);
15951595
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
15961596
list_del_init(&skb->tcp_tsorted_anchor);
@@ -3140,7 +3140,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
31403140
tp->retrans_out -= acked_pcount;
31413141
flag |= FLAG_RETRANS_DATA_ACKED;
31423142
} else if (!(sacked & TCPCB_SACKED_ACKED)) {
3143-
last_ackt = skb->skb_mstamp;
3143+
last_ackt = tcp_skb_timestamp_us(skb);
31443144
WARN_ON_ONCE(last_ackt == 0);
31453145
if (!first_ackt)
31463146
first_ackt = last_ackt;
@@ -3158,7 +3158,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
31583158
tp->delivered += acked_pcount;
31593159
if (!tcp_skb_spurious_retrans(tp, skb))
31603160
tcp_rack_advance(tp, sacked, scb->end_seq,
3161-
skb->skb_mstamp);
3161+
tcp_skb_timestamp_us(skb));
31623162
}
31633163
if (sacked & TCPCB_LOST)
31643164
tp->lost_out -= acked_pcount;
@@ -3253,7 +3253,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
32533253
tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
32543254
}
32553255
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
3256-
sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
3256+
sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
3257+
tcp_skb_timestamp_us(skb))) {
32573258
/* Do not re-arm RTO if the sack RTT is measured from data sent
32583259
* after when the head was last (re)transmitted. Otherwise the
32593260
* timeout may continue to extend in loss recovery.

net/ipv4/tcp_ipv4.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -556,7 +556,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
556556
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
557557

558558
tcp_mstamp_refresh(tp);
559-
delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
559+
delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
560560
remaining = icsk->icsk_rto -
561561
usecs_to_jiffies(delta_us);
562562

net/ipv4/tcp_output.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1993,7 +1993,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
19931993
head = tcp_rtx_queue_head(sk);
19941994
if (!head)
19951995
goto send_now;
1996-
age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
1996+
age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
19971997
/* If next ACK is likely to come too late (half srtt), do not defer */
19981998
if (age < (tp->srtt_us >> 4))
19991999
goto send_now;

net/ipv4/tcp_rate.c

+8-7
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
5555
* bandwidth estimate.
5656
*/
5757
if (!tp->packets_out) {
58-
tp->first_tx_mstamp = skb->skb_mstamp;
59-
tp->delivered_mstamp = skb->skb_mstamp;
58+
u64 tstamp_us = tcp_skb_timestamp_us(skb);
59+
60+
tp->first_tx_mstamp = tstamp_us;
61+
tp->delivered_mstamp = tstamp_us;
6062
}
6163

6264
TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
@@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
8890
rs->is_app_limited = scb->tx.is_app_limited;
8991
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
9092

93+
/* Record send time of most recently ACKed packet: */
94+
tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
9195
/* Find the duration of the "send phase" of this window: */
92-
rs->interval_us = tcp_stamp_us_delta(
93-
skb->skb_mstamp,
94-
scb->tx.first_tx_mstamp);
96+
rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
97+
scb->tx.first_tx_mstamp);
9598

96-
/* Record send time of most recently ACKed packet: */
97-
tp->first_tx_mstamp = skb->skb_mstamp;
9899
}
99100
/* Mark off the skb delivered once it's sacked to avoid being
100101
* used again when it's cumulatively acked. For acked packets

net/ipv4/tcp_recovery.c

+3-2
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
5151
s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
5252
{
5353
return tp->rack.rtt_us + reo_wnd -
54-
tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
54+
tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
5555
}
5656

5757
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -92,7 +92,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
9292
!(scb->sacked & TCPCB_SACKED_RETRANS))
9393
continue;
9494

95-
if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
95+
if (!tcp_rack_sent_after(tp->rack.mstamp,
96+
tcp_skb_timestamp_us(skb),
9697
tp->rack.end_seq, scb->end_seq))
9798
break;
9899

0 commit comments

Comments
 (0)