Yu Zhu
December 12, 2022 03:42AM
Hi,

Thanks for reply.

I’m agree that a better framework is needed to implement different congestion algorithms.
And the current implementation may have a little problem, for example it should be better for
passing acknowledge event (including the ack of multiple packets and the losts) instead of pass each
packet info to congestion control algorithms.

As you suggested, “moved rtt and congestion control variables to ngx_quic_path_t structure” is separate patch now.
And this patch is prerequisite for multipath quic.

# HG changeset patch
# User Yu Zhu <lishu.zy@alibaba-inc.com>
# Date 1670831223 -28800
# Mon Dec 12 15:47:03 2022 +0800
# Branch quic
# Node ID 8723d4282f6d6a5b67e271652f46d79ee24dfb39
# Parent b87a0dbc1150f415def5bc1e1f00d02b33519026
QUIC: moved rtt and congestion control variables to ngx_quic_path_t structure.

As rfc9001, section 6. Loss Detection:

Loss detection is separate per packet number space, unlike RTT measurement
and congestion control, because RTT and congestion control are properties
of the path, whereas loss detection also relies upon key availability.

No functional changes.

diff -r b87a0dbc1150 -r 8723d4282f6d src/event/quic/ngx_event_quic.c
--- a/src/event/quic/ngx_event_quic.c Tue Oct 25 12:52:09 2022 +0400
+++ b/src/event/quic/ngx_event_quic.c Mon Dec 12 15:47:03 2022 +0800
@@ -263,15 +263,6 @@
ngx_queue_init(&qc->free_frames);
- qc->avg_rtt = NGX_QUIC_INITIAL_RTT;
- qc->rttvar = NGX_QUIC_INITIAL_RTT / 2;
- qc->min_rtt = NGX_TIMER_INFINITE;
- qc->first_rtt = NGX_TIMER_INFINITE;
-
- /*
- * qc->latest_rtt = 0
- */
-
qc->pto.log = c->log;
qc->pto.data = c;
qc->pto.handler = ngx_quic_pto_handler;
@@ -311,12 +302,6 @@
qc->streams.client_max_streams_uni = qc->tp.initial_max_streams_uni;
qc->streams.client_max_streams_bidi = qc->tp.initial_max_streams_bidi;
- qc->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size,
- ngx_max(2 * qc->tp.max_udp_payload_size,
- 14720));
- qc->congestion.ssthresh = (size_t) -1;
- qc->congestion.recovery_start = ngx_current_msec;
-
if (pkt->validated && pkt->retried) {
qc->tp.retry_scid.len = pkt->dcid.len;
qc->tp.retry_scid.data = ngx_pstrdup(c->pool, &pkt->dcid);
diff -r b87a0dbc1150 -r 8723d4282f6d src/event/quic/ngx_event_quic_ack.c
--- a/src/event/quic/ngx_event_quic_ack.c Tue Oct 25 12:52:09 2022 +0400
+++ b/src/event/quic/ngx_event_quic_ack.c Mon Dec 12 15:47:03 2022 +0800
@@ -29,7 +29,7 @@
} ngx_quic_ack_stat_t;

-static ngx_inline ngx_msec_t ngx_quic_lost_threshold(ngx_quic_connection_t *qc);
+static ngx_inline ngx_msec_t ngx_quic_lost_threshold(ngx_quic_path_t *path);
static void ngx_quic_rtt_sample(ngx_connection_t *c, ngx_quic_ack_frame_t *ack,
enum ssl_encryption_level_t level, ngx_msec_t send_time);
static ngx_int_t ngx_quic_handle_ack_frame_range(ngx_connection_t *c,
@@ -48,11 +48,11 @@
/* RFC 9002, 6.1.2. Time Threshold: kTimeThreshold, kGranularity */
static ngx_inline ngx_msec_t
-ngx_quic_lost_threshold(ngx_quic_connection_t *qc)
+ngx_quic_lost_threshold(ngx_quic_path_t *path)
{
ngx_msec_t thr;
- thr = ngx_max(qc->latest_rtt, qc->avg_rtt);
+ thr = ngx_max(path->latest_rtt, path->avg_rtt);
thr += thr >> 3;
return ngx_max(thr, NGX_QUIC_TIME_GRANULARITY);
@@ -179,21 +179,23 @@
enum ssl_encryption_level_t level, ngx_msec_t send_time)
{
ngx_msec_t latest_rtt, ack_delay, adjusted_rtt, rttvar_sample;
+ ngx_quic_path_t *path;
ngx_quic_connection_t *qc;
qc = ngx_quic_get_connection(c);
+ path = qc->path;
latest_rtt = ngx_current_msec - send_time;
- qc->latest_rtt = latest_rtt;
+ path->latest_rtt = latest_rtt;
- if (qc->min_rtt == NGX_TIMER_INFINITE) {
- qc->min_rtt = latest_rtt;
- qc->avg_rtt = latest_rtt;
- qc->rttvar = latest_rtt / 2;
- qc->first_rtt = ngx_current_msec;
+ if (path->min_rtt == NGX_TIMER_INFINITE) {
+ path->min_rtt = latest_rtt;
+ path->avg_rtt = latest_rtt;
+ path->rttvar = latest_rtt / 2;
+ path->first_rtt = ngx_current_msec;
} else {
- qc->min_rtt = ngx_min(qc->min_rtt, latest_rtt);
+ path->min_rtt = ngx_min(path->min_rtt, latest_rtt);
ack_delay = (ack->delay << qc->ctp.ack_delay_exponent) / 1000;
@@ -203,18 +205,18 @@
adjusted_rtt = latest_rtt;
- if (qc->min_rtt + ack_delay < latest_rtt) {
+ if (path->min_rtt + ack_delay < latest_rtt) {
adjusted_rtt -= ack_delay;
}
- qc->avg_rtt += (adjusted_rtt >> 3) - (qc->avg_rtt >> 3);
- rttvar_sample = ngx_abs((ngx_msec_int_t) (qc->avg_rtt - adjusted_rtt));
- qc->rttvar += (rttvar_sample >> 2) - (qc->rttvar >> 2);
+ path->avg_rtt += (adjusted_rtt >> 3) - (path->avg_rtt >> 3);
+ rttvar_sample = ngx_abs((ngx_msec_int_t) (path->avg_rtt - adjusted_rtt));
+ path->rttvar += (rttvar_sample >> 2) - (path->rttvar >> 2);
}
ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0,
"quic rtt sample latest:%M min:%M avg:%M var:%M",
- latest_rtt, qc->min_rtt, qc->avg_rtt, qc->rttvar);
+ latest_rtt, path->min_rtt, path->avg_rtt, path->rttvar);
}

@@ -317,7 +319,7 @@
}
qc = ngx_quic_get_connection(c);
- cg = &qc->congestion;
+ cg = &qc->path->congestion;
blocked = (cg->in_flight >= cg->window) ? 1 : 0;
@@ -428,13 +430,15 @@
ngx_uint_t i, nlost;
ngx_msec_t now, wait, thr, oldest, newest;
ngx_queue_t *q;
+ ngx_quic_path_t *path;
ngx_quic_frame_t *start;
ngx_quic_send_ctx_t *ctx;
ngx_quic_connection_t *qc;
qc = ngx_quic_get_connection(c);
+ path = qc->path;
now = ngx_current_msec;
- thr = ngx_quic_lost_threshold(qc);
+ thr = ngx_quic_lost_threshold(path);
/* send time of lost packets across all send contexts */
oldest = NGX_TIMER_INFINITE;
@@ -471,7 +475,7 @@
break;
}
- if (start->last > qc->first_rtt) {
+ if (start->last > path->first_rtt) {
if (oldest == NGX_TIMER_INFINITE || start->last < oldest) {
oldest = start->last;
@@ -519,8 +523,8 @@
qc = ngx_quic_get_connection(c);
- duration = qc->avg_rtt;
- duration += ngx_max(4 * qc->rttvar, NGX_QUIC_TIME_GRANULARITY);
+ duration = qc->path->avg_rtt;
+ duration += ngx_max(4 * qc->path->rttvar, NGX_QUIC_TIME_GRANULARITY);
duration += qc->ctp.max_ack_delay;
duration *= NGX_QUIC_PERSISTENT_CONGESTION_THR;
@@ -535,7 +539,7 @@
ngx_quic_connection_t *qc;
qc = ngx_quic_get_connection(c);
- cg = &qc->congestion;
+ cg = &qc->path->congestion;
cg->recovery_start = ngx_current_msec;
cg->window = qc->tp.max_udp_payload_size * 2;
@@ -656,7 +660,7 @@
}
qc = ngx_quic_get_connection(c);
- cg = &qc->congestion;
+ cg = &qc->path->congestion;
blocked = (cg->in_flight >= cg->window) ? 1 : 0;
@@ -721,7 +725,7 @@
if (ctx->largest_ack != NGX_QUIC_UNSET_PN) {
q = ngx_queue_head(&ctx->sent);
f = ngx_queue_data(q, ngx_quic_frame_t, queue);
- w = (ngx_msec_int_t) (f->last + ngx_quic_lost_threshold(qc) - now);
+ w = (ngx_msec_int_t) (f->last + ngx_quic_lost_threshold(qc->path) - now);
if (f->pnum <= ctx->largest_ack) {
if (w < 0 || ctx->largest_ack - f->pnum >= NGX_QUIC_PKT_THR) {
@@ -777,17 +781,19 @@
ngx_quic_pto(ngx_connection_t *c, ngx_quic_send_ctx_t *ctx)
{
ngx_msec_t duration;
+ ngx_quic_path_t *path;
ngx_quic_connection_t *qc;
qc = ngx_quic_get_connection(c);
+ path = qc->path;
/* RFC 9002, Appendix A.8. Setting the Loss Detection Timer */
- duration = qc->avg_rtt;
+ duration = path->avg_rtt;
- duration += ngx_max(4 * qc->rttvar, NGX_QUIC_TIME_GRANULARITY);
+ duration += ngx_max(4 * path->rttvar, NGX_QUIC_TIME_GRANULARITY);
duration <<= qc->pto_count;
- if (qc->congestion.in_flight == 0) { /* no in-flight packets */
+ if (path->congestion.in_flight == 0) { /* no in-flight packets */
return duration;
}
diff -r b87a0dbc1150 -r 8723d4282f6d src/event/quic/ngx_event_quic_connection.h
--- a/src/event/quic/ngx_event_quic_connection.h Tue Oct 25 12:52:09 2022 +0400
+++ b/src/event/quic/ngx_event_quic_connection.h Mon Dec 12 15:47:03 2022 +0800
@@ -80,6 +80,14 @@
};

+typedef struct {
+ size_t in_flight;
+ size_t window;
+ size_t ssthresh;
+ ngx_msec_t recovery_start;
+} ngx_quic_congestion_t;
+
+
struct ngx_quic_path_s {
ngx_queue_t queue;
struct sockaddr *sockaddr;
@@ -96,6 +104,15 @@
uint64_t seqnum;
ngx_str_t addr_text;
u_char text[NGX_SOCKADDR_STRLEN];
+
+ ngx_msec_t first_rtt;
+ ngx_msec_t latest_rtt;
+ ngx_msec_t avg_rtt;
+ ngx_msec_t min_rtt;
+ ngx_msec_t rttvar;
+
+ ngx_quic_congestion_t congestion;
+
unsigned validated:1;
unsigned validating:1;
unsigned limited:1;
@@ -143,14 +160,6 @@
} ngx_quic_streams_t;

-typedef struct {
- size_t in_flight;
- size_t window;
- size_t ssthresh;
- ngx_msec_t recovery_start;
-} ngx_quic_congestion_t;
-
-
/*
* RFC 9000, 12.3. Packet Numbers
*
@@ -218,12 +227,6 @@
ngx_event_t path_validation;
ngx_msec_t last_cc;
- ngx_msec_t first_rtt;
- ngx_msec_t latest_rtt;
- ngx_msec_t avg_rtt;
- ngx_msec_t min_rtt;
- ngx_msec_t rttvar;
-
ngx_uint_t pto_count;
ngx_queue_t free_frames;
@@ -237,7 +240,6 @@
#endif
ngx_quic_streams_t streams;
- ngx_quic_congestion_t congestion;
off_t received;
diff -r b87a0dbc1150 -r 8723d4282f6d src/event/quic/ngx_event_quic_migration.c
--- a/src/event/quic/ngx_event_quic_migration.c Tue Oct 25 12:52:09 2022 +0400
+++ b/src/event/quic/ngx_event_quic_migration.c Mon Dec 12 15:47:03 2022 +0800
@@ -135,17 +135,26 @@
{
/* address did not change */
rst = 0;
+
+ path->avg_rtt = prev->avg_rtt;
+ path->rttvar = prev->rttvar;
+ path->min_rtt = prev->min_rtt;
+ path->first_rtt = prev->first_rtt;
+ path->latest_rtt = prev->latest_rtt;
+
+ ngx_memcpy(&path->congestion, &prev->congestion,
+ sizeof(ngx_quic_congestion_t));
}
}
if (rst) {
- ngx_memzero(&qc->congestion, sizeof(ngx_quic_congestion_t));
+ ngx_memzero(&path->congestion, sizeof(ngx_quic_congestion_t));
- qc->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size,
+ path->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size,
ngx_max(2 * qc->tp.max_udp_payload_size,
14720));
- qc->congestion.ssthresh = (size_t) -1;
- qc->congestion.recovery_start = ngx_current_msec;
+ path->congestion.ssthresh = (size_t) -1;
+ path->congestion.recovery_start = ngx_current_msec;
}
/*
@@ -217,6 +226,21 @@
path->addr_text.len = ngx_sock_ntop(sockaddr, socklen, path->text,
NGX_SOCKADDR_STRLEN, 1);
+ path->avg_rtt = NGX_QUIC_INITIAL_RTT;
+ path->rttvar = NGX_QUIC_INITIAL_RTT / 2;
+ path->min_rtt = NGX_TIMER_INFINITE;
+ path->first_rtt = NGX_TIMER_INFINITE;
+
+ /*
+ * qc->latest_rtt = 0
+ */
+
+ path->congestion.window = ngx_min(10 * qc->tp.max_udp_payload_size,
+ ngx_max(2 * qc->tp.max_udp_payload_size,
+ 14720));
+ path->congestion.ssthresh = (size_t) -1;
+ path->congestion.recovery_start = ngx_current_msec;
+
ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0,
"quic path seq:%uL created addr:%V",
path->seqnum, &path->addr_text);
diff -r b87a0dbc1150 -r 8723d4282f6d src/event/quic/ngx_event_quic_output.c
--- a/src/event/quic/ngx_event_quic_output.c Tue Oct 25 12:52:09 2022 +0400
+++ b/src/event/quic/ngx_event_quic_output.c Mon Dec 12 15:47:03 2022 +0800
@@ -87,7 +87,7 @@
c->log->action = "sending frames";
qc = ngx_quic_get_connection(c);
- cg = &qc->congestion;
+ cg = &qc->path->congestion;
in_flight = cg->in_flight;
@@ -135,8 +135,8 @@
static u_char dst[NGX_QUIC_MAX_UDP_PAYLOAD_SIZE];
qc = ngx_quic_get_connection(c);
- cg = &qc->congestion;
path = qc->path;
+ cg = &path->congestion;
while (cg->in_flight < cg->window) {
@@ -223,7 +223,7 @@
qc = ngx_quic_get_connection(c);
- cg = &qc->congestion;
+ cg = &qc->path->congestion;
while (!ngx_queue_empty(&ctx->sending)) {
@@ -336,8 +336,8 @@
static u_char dst[NGX_QUIC_MAX_UDP_SEGMENT_BUF];
qc = ngx_quic_get_connection(c);
- cg = &qc->congestion;
path = qc->path;
+ cg = &path->congestion;
ctx = ngx_quic_get_send_ctx(qc, ssl_encryption_application);


--
Yu Zhu


From: Roman Arutyunyan <arut@nginx.com>
Date: Wednesday, December 7, 2022 at 23:05
To: nginx-devel@nginx.org <nginx-devel@nginx.org>
Subject: Re: QUIC: reworked congestion control mechanism.
Hi,

Thanks for the path.

On Tue, Dec 06, 2022 at 02:35:37PM +0000, 朱宇 wrote:
> Hi,
>
> # HG changeset patch
> # User Yu Zhu <lishu.zy@alibaba-inc.com>
> # Date 1670326031 -28800
> # Tue Dec 06 19:27:11 2022 +0800
> # Branch quic
> # Node ID 9a47ff1223bb32c8ddb146d731b395af89769a97
> # Parent 1a320805265db14904ca9deaae8330f4979619ce
> QUIC: reworked congestion control mechanism.
>
> 1. move rtt measurement and congestion control to struct ngx_quic_path_t
> because RTT and congestion control are properities of the path.

I think this part should be moved out to a separate patch.

> 2. introduced struct "ngx_quic_congestion_ops_t" to wrap callback functions
> of congestion control and extract the reno algorithm from ngx_event_quic_ack.c.

The biggest question about this part is how extensible is this approach?
We are planning to implement more congestion control algorithms in the future
and need a framework that would allow us to do that.

Even CUBIC needs more data fields that we have now, and BBR will prooably
need much more than that. Not sure how we'll add those data fields considering
the proposed modular design. Also, we need to make sure the API is enough for
future algorithms.

I suggest that we finish the first part which moves congestion control
to the path object. Then, until we have at least one other congestion
control algorithm supported, it's hard to come up with a good API for it.
I this we can postpone the second part until then.

Also, I think CUBIC can be hardcoded into Reno without modular redesign of the
code.

> No functional changes.

[..]

> diff -r 1a320805265d -r 9a47ff1223bb src/event/quic/congestion/ngx_quic_reno.c
> --- /dev/null Thu Jan 01 00:00:00 1970 +0000
> +++ b/src/event/quic/congestion/ngx_quic_reno.c Tue Dec 06 19:27:11 2022 +0800
> @@ -0,0 +1,133 @@
> +
> +/*
> + * Copyright (C) Nginx, Inc.
> + */
> +
> +
> +#include <ngx_config.h>
> +#include <ngx_core.h>
> +#include <ngx_event.h>
> +#include <ngx_event_quic_connection.h>
> +
> +
> +static void ngx_quic_reno_on_init(ngx_connection_t *c, ngx_quic_congestion_t *cg);
> +static ngx_int_t ngx_quic_reno_on_ack(ngx_connection_t *c, ngx_quic_frame_t *f);
> +static ngx_int_t ngx_quic_reno_on_lost(ngx_connection_t *c, ngx_quic_frame_t *f);
> +
> +
> +ngx_quic_congestion_ops_t ngx_quic_reno = {
> + ngx_string("reno"),
> + ngx_quic_reno_on_init,
> + ngx_quic_reno_on_ack,
> + ngx_quic_reno_on_lost
> +};
> +
> +
> +static void
> +ngx_quic_reno_on_init(ngx_connection_t *c, ngx_quic_congestion_t *cg)
> +{
> + ngx_quic_connection_t *qc;
> +
> + qc = ngx_quic_get_connection(c);
> +
> + cg->window = ngx_min(10 * qc->tp.max_udp_payload_size,
> + ngx_max(2 * qc->tp.max_udp_payload_size,
> + 14720));
> + cg->ssthresh = (size_t) -1;
> + cg->recovery_start = ngx_current_msec;
> +}
> +
> +
> +static ngx_int_t
> +ngx_quic_reno_on_ack(ngx_connection_t *c, ngx_quic_frame_t *f)
> +{
> + ngx_msec_t timer;
> + ngx_quic_path_t *path;
> + ngx_quic_connection_t *qc;
> + ngx_quic_congestion_t *cg;
> +
> + qc = ngx_quic_get_connection(c);
> + path = qc->path;

What if the packet was sent on a different path?

> +
> + cg = &path->congestion;
> +
> + cg->in_flight -= f->plen;
> +
> + timer = f->last - cg->recovery_start;
> +
> + if ((ngx_msec_int_t) timer <= 0) {
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion ack recovery win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> +
> + return NGX_DONE;
> + }
> +
> + if (cg->window < cg->ssthresh) {
> + cg->window += f->plen;
> +
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion slow start win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> +
> + } else {
> + cg->window += qc->tp.max_udp_payload_size * f->plen / cg->window;
> +
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion avoidance win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> + }
> +
> + /* prevent recovery_start from wrapping */
> +
> + timer = cg->recovery_start - ngx_current_msec + qc->tp.max_idle_timeout * 2;
> +
> + if ((ngx_msec_int_t) timer < 0) {
> + cg->recovery_start = ngx_current_msec - qc->tp.max_idle_timeout * 2;
> + }
> +
> + return NGX_OK;
> +}
> +
> +
> +static ngx_int_t
> +ngx_quic_reno_on_lost(ngx_connection_t *c, ngx_quic_frame_t *f)
> +{
> + ngx_msec_t timer;
> + ngx_quic_path_t *path;
> + ngx_quic_connection_t *qc;
> + ngx_quic_congestion_t *cg;
> +
> + qc = ngx_quic_get_connection(c);
> + path = qc->path;

Same here.

> +
> + cg = &path->congestion;
> +
> + cg->in_flight -= f->plen;
> + f->plen = 0;
> +
> + timer = f->last - cg->recovery_start;
> +
> + if ((ngx_msec_int_t) timer <= 0) {
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion lost recovery win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> +
> + return NGX_DONE;
> + }
> +
> + cg->recovery_start = ngx_current_msec;
> + cg->window /= 2;
> +
> + if (cg->window < qc->tp.max_udp_payload_size * 2) {
> + cg->window = qc->tp.max_udp_payload_size * 2;
> + }
> +
> + cg->ssthresh = cg->window;
> +
> + ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
> + "quic congestion lost win:%uz ss:%z if:%uz",
> + cg->window, cg->ssthresh, cg->in_flight);
> +
> + return NGX_OK;
> +}

[..]

--
Roman Arutyunyan
_______________________________________________
nginx-devel mailing list -- nginx-devel@nginx.org
To unsubscribe send an email to nginx-devel-leave@nginx.org
_______________________________________________
nginx-devel mailing list -- nginx-devel@nginx.org
To unsubscribe send an email to nginx-devel-leave@nginx.org
Subject Author Views Posted

QUIC: reworked congestion control mechanism.

朱宇 439 December 06, 2022 09:38AM

Re: QUIC: reworked congestion control mechanism.

Roman Arutyunyan 107 December 07, 2022 10:04AM

Re: QUIC: reworked congestion control mechanism.

Yu Zhu 133 December 12, 2022 03:42AM



Sorry, you do not have permission to post/reply in this forum.

Online Users

Guests: 267
Record Number of Users: 8 on April 13, 2023
Record Number of Guests: 421 on December 02, 2018
Powered by nginx      Powered by FreeBSD      PHP Powered      Powered by MariaDB      ipv6 ready