Преглед изворни кода

Adds ECN handling managed by ROCCET. Use of srRTT as a hard condition after receiving an ECN.

master
Lukas Prause пре 21 часа
родитељ
комит
0d299edd5f
2 измењених фајлова са 48 додато и 9 уклоњено
  1. +45
    -8
      tcp_roccet.c
  2. +3
    -1
      tcp_roccet.h

+ 45
- 8
tcp_roccet.c Прегледај датотеку

* CUBIC's window growth function and adds, based on RTT * CUBIC's window growth function and adds, based on RTT
* and ACK rate, congestion events. * and ACK rate, congestion events.
* *
* NOTE: A paper for TCP ROCCET is currently under review.
* A draft of this paper can be found here:
* A peer-reviewed paper on TCP ROCCET will be presented at the WONS 2026 conference.
* A draft of the paper is available here:
* https://arxiv.org/abs/2510.25281
* *
* *
* Further information about CUBIC: * Further information about CUBIC:
ca->bw_limit.next_check = 0; ca->bw_limit.next_check = 0;
ca->curr_min_rtt_timed.rtt = ~0U; ca->curr_min_rtt_timed.rtt = ~0U;
ca->curr_min_rtt_timed.time = ~0U; ca->curr_min_rtt_timed.time = ~0U;
ca->last_rtt = 0;
ca->ece_srrtt = 0;
ca->ece_cwnd = 2;
} }


static inline void update_min_rtt(struct sock *sk) static inline void update_min_rtt(struct sock *sk)
update_min_rtt(sk); update_min_rtt(sk);
update_srrtt(sk); update_srrtt(sk);


/* Reset ECE handling if we already have more bandwidth
* than we received the last ECE.
*/
if(ca->ece_srrtt > 0){
if(tcp_snd_cwnd(tp) >= ca->ece_cwnd){
ca->ece_srrtt = 0;
}
}

/* ROCCET drain. /* ROCCET drain.
* Do not increase the cwnd for 100ms after a roccet congestion event * Do not increase the cwnd for 100ms after a roccet congestion event
*/ */
if (now - ca->roccet_last_event_time_us <= 100 * USEC_PER_MSEC) if (now - ca->roccet_last_event_time_us <= 100 * USEC_PER_MSEC)
return; return;


/* Lift off: Detect an exit point for tcp slow start
/* LAUNCH: Detect an exit point for tcp slow start
* in networks with large buffers of multiple BDP * in networks with large buffers of multiple BDP
* Like in cellular networks (5G, ...). * Like in cellular networks (5G, ...).
* Or exit LAUNCH if cwnd is too large for application layer
* data rate.
*/ */
if (tcp_in_slow_start(tp) && ca->curr_srRTT > sr_rtt_upper_bound &&
get_ack_rate_diff(ca) >= ack_rate_diff_ss) {

if ((tcp_in_slow_start(tp) && ca->curr_srRTT > sr_rtt_upper_bound &&
get_ack_rate_diff(ca) >= ack_rate_diff_ss) ||
(!tcp_is_cwnd_limited(sk) && tcp_in_slow_start(tp))
) {
ca->epoch_start = 0; ca->epoch_start = 0;


/* Handle inital slow start. Here we observe the most problems */ /* Handle inital slow start. Here we observe the most problems */
if (roccet_xj < sr_rtt_upper_bound) if (roccet_xj < sr_rtt_upper_bound)
roccet_xj = sr_rtt_upper_bound; roccet_xj = sr_rtt_upper_bound;


if (ca->curr_srRTT > roccet_xj && bw_limit_detect) {
/* This is true if we recently received an ECE bit.
* Therefore we should respect the srRTT at this piont.
*/
if(ca->ece_srrtt < roccet_xj && ca->ece_srrtt > 0)
roccet_xj = ca->ece_srrtt;

if (ca->curr_srRTT > roccet_xj && (bw_limit_detect || ca->ece_srrtt > 0)) {
ca->epoch_start = 0; ca->epoch_start = 0;
ca->roccet_last_event_time_us = now; ca->roccet_last_event_time_us = now;
ca->cnt = 100 * tcp_snd_cwnd(tp); ca->cnt = 100 * tcp_snd_cwnd(tp);
/* Don't exit slow start if loss occurs. */ /* Don't exit slow start if loss occurs. */
if (tcp_in_slow_start(tp)) if (tcp_in_slow_start(tp))
return tcp_snd_cwnd(tp); return tcp_snd_cwnd(tp);
ca->epoch_start = 0; /* end of epoch */ ca->epoch_start = 0; /* end of epoch */


/* Wmax and fast convergence */ /* Wmax and fast convergence */
} }
} }


__bpf_kfunc static void roccet_in_ack_event(struct sock *sk, u32 flags)
{
struct tcp_sock *tp = tcp_sk(sk);
struct roccettcp *ca = inet_csk_ca(sk);

/* Handle ECE bit.
* Pocessing of ECE events is done in roccettcp_cong_avoid()
*/
if (flags & CA_ACK_ECE) {
ca->ece_srrtt = ca->curr_srRTT;
ca->ece_cwnd = tcp_snd_cwnd(tp);
}
}

static struct tcp_congestion_ops roccet_tcp __read_mostly = { static struct tcp_congestion_ops roccet_tcp __read_mostly = {
.init = roccettcp_init, .init = roccettcp_init,
.ssthresh = roccettcp_recalc_ssthresh, .ssthresh = roccettcp_recalc_ssthresh,
.undo_cwnd = tcp_reno_undo_cwnd, .undo_cwnd = tcp_reno_undo_cwnd,
.cwnd_event = roccettcp_cwnd_event, .cwnd_event = roccettcp_cwnd_event,
.pkts_acked = roccettcp_acked, .pkts_acked = roccettcp_acked,
.in_ack_event = roccet_in_ack_event,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "roccet", .name = "roccet",
}; };

+ 3
- 1
tcp_roccet.h Прегледај датотеку

u32 tcp_cwnd; /* estimated tcp cwnd */ u32 tcp_cwnd; /* estimated tcp cwnd */
u32 curr_rtt; /* the minimum rtt of current round */ u32 curr_rtt; /* the minimum rtt of current round */


u32 roccet_last_event_time_us; /* The last time ROCCETv2 was triggered */
u32 roccet_last_event_time_us; /* The last time ROCCET was triggered */
u32 ece_cwnd; /* The cwnd when a ECE bit was received */
u32 ece_srrtt; /* The srRTT whent the ECE was received */
u32 curr_min_rtt; /* The current observed minRTT */ u32 curr_min_rtt; /* The current observed minRTT */
struct TimedRTT curr_min_rtt_timed; /* The current observed minRTT with struct TimedRTT curr_min_rtt_timed; /* The current observed minRTT with
the timestamp when it was observed */ the timestamp when it was observed */

Loading…
Откажи
Сачувај