Adds ECN handling managed by ROCCET.

This commit is contained in:
Lukas Prause
2026-01-30 13:48:31 +01:00
parent 5ebd5c0cb7
commit c24b02231a
2 changed files with 28 additions and 6 deletions

View File

@@ -130,6 +130,7 @@ static inline void roccettcp_reset(struct roccettcp *ca)
ca->curr_min_rtt_timed.rtt = ~0U;
ca->curr_min_rtt_timed.time = ~0U;
ca->last_rtt = 0;
ca->ece_received = 0;
}
static inline void update_min_rtt(struct sock *sk)
@@ -431,12 +432,17 @@ __bpf_kfunc static void roccettcp_cong_avoid(struct sock *sk, u32 ack,
if (now - ca->roccet_last_event_time_us <= 100 * USEC_PER_MSEC)
return;
/* Lift off: Detect an exit point for tcp slow start
/* LAUNCH: Detect an exit point for tcp slow start
* in networks with large buffers of multiple BDP
* Like in cellular networks (5G, ...).
* Or exit LAUNCH if cwnd is too large for application layer
* data rate.
*/
if (tcp_in_slow_start(tp) && ca->curr_srRTT > sr_rtt_upper_bound &&
get_ack_rate_diff(ca) >= ack_rate_diff_ss) {
if ((tcp_in_slow_start(tp) && ca->curr_srRTT > sr_rtt_upper_bound &&
get_ack_rate_diff(ca) >= ack_rate_diff_ss) ||
(!tcp_is_cwnd_limited(sk) && tcp_in_slow_start(tp))
) {
ca->epoch_start = 0;
/* Handle inital slow start. Here we observe the most problems */
@@ -487,7 +493,9 @@ __bpf_kfunc static void roccettcp_cong_avoid(struct sock *sk, u32 ack,
if (roccet_xj < sr_rtt_upper_bound)
roccet_xj = sr_rtt_upper_bound;
if (ca->curr_srRTT > roccet_xj && bw_limit_detect) {
if (ca->curr_srRTT > roccet_xj && (bw_limit_detect || ca->ece_received)) {
if(ca->ece_received)
ca->ece_received = 0;
ca->epoch_start = 0;
ca->roccet_last_event_time_us = now;
ca->cnt = 100 * tcp_snd_cwnd(tp);
@@ -527,7 +535,7 @@ __bpf_kfunc static u32 roccettcp_recalc_ssthresh(struct sock *sk)
/* Don't exit slow start if loss occurs. */
if (tcp_in_slow_start(tp))
return tcp_snd_cwnd(tp);
ca->epoch_start = 0; /* end of epoch */
/* Wmax and fast convergence */
@@ -576,6 +584,18 @@ __bpf_kfunc static void roccettcp_acked(struct sock *sk,
}
}
__bpf_kfunc static void roccet_in_ack_event(struct sock *sk, u32 flags)
{
struct roccettcp *ca = inet_csk_ca(sk);
/* Handle ECE bit.
* Pocessing of ECE events is done in roccettcp_cong_avoid()
*/
if (flags & CA_ACK_ECE) {
ca->ece_received = 1;
}
}
static struct tcp_congestion_ops roccet_tcp __read_mostly = {
.init = roccettcp_init,
.ssthresh = roccettcp_recalc_ssthresh,
@@ -584,6 +604,7 @@ static struct tcp_congestion_ops roccet_tcp __read_mostly = {
.undo_cwnd = tcp_reno_undo_cwnd,
.cwnd_event = roccettcp_cwnd_event,
.pkts_acked = roccettcp_acked,
.in_ack_event = roccet_in_ack_event,
.owner = THIS_MODULE,
.name = "roccet",
};