Welcome! Log In Create A New Profile

Advanced

[PATCH] Added keepalive_async_fails command

J Carter
April 02, 2023 01:58PM
Hello,

I've also attached an example nginx.conf and test script that simulates the asynchronous close events.
Two different test cases can be found within that, one with path /1 for single peer upstream and /2 for multi-peer.

You should see 2 upstream addresses repeated in a row per-upstream-server in the access log by default, as it fails
through the cached connections & next performs next upstream tries.

Any feedback would be appreciated.

# HG changeset patch
# User jordanc.carter@outlook.com
# Date 1680457073 -3600
# Sun Apr 02 18:37:53 2023 +0100
# Node ID 9ec4d7a8cdf6cdab00d09dff75fa6045f6f5533f
# Parent 5f1d05a21287ba0290dd3a17ad501595b442a194
Added keepalive_async_fails command to keepalive load balancer module.
This value determines the number suspected keepalive race events
per-upstream-try that will be tolerated before a subsequent network connection
error is considered a true failure.

diff -r 5f1d05a21287 -r 9ec4d7a8cdf6 src/event/ngx_event_connect.h
--- a/src/event/ngx_event_connect.h Tue Mar 28 18:01:54 2023 +0300
+++ b/src/event/ngx_event_connect.h Sun Apr 02 18:37:53 2023 +0100
@@ -17,6 +17,7 @@
#define NGX_PEER_KEEPALIVE 1
#define NGX_PEER_NEXT 2
#define NGX_PEER_FAILED 4
+#define NGX_PEER_ASYNC_FAILED 8


typedef struct ngx_peer_connection_s ngx_peer_connection_t;
@@ -41,6 +42,7 @@
ngx_str_t *name;

ngx_uint_t tries;
+ ngx_uint_t async_fails;
ngx_msec_t start_time;

ngx_event_get_peer_pt get;
diff -r 5f1d05a21287 -r 9ec4d7a8cdf6 src/http/modules/ngx_http_upstream_keepalive_module.c
--- a/src/http/modules/ngx_http_upstream_keepalive_module.c Tue Mar 28 18:01:54 2023 +0300
+++ b/src/http/modules/ngx_http_upstream_keepalive_module.c Sun Apr 02 18:37:53 2023 +0100
@@ -13,6 +13,7 @@
typedef struct {
ngx_uint_t max_cached;
ngx_uint_t requests;
+ ngx_uint_t max_async_fails;
ngx_msec_t time;
ngx_msec_t timeout;

@@ -108,6 +109,13 @@
offsetof(ngx_http_upstream_keepalive_srv_conf_t, requests),
NULL },

+ { ngx_string("keepalive_async_fails"),
+ NGX_HTTP_UPS_CONF|NGX_CONF_TAKE1,
+ ngx_conf_set_num_slot,
+ NGX_HTTP_SRV_CONF_OFFSET,
+ offsetof(ngx_http_upstream_keepalive_srv_conf_t, max_async_fails),
+ NULL },
+
ngx_null_command
};

@@ -160,6 +168,7 @@
ngx_conf_init_msec_value(kcf->time, 3600000);
ngx_conf_init_msec_value(kcf->timeout, 60000);
ngx_conf_init_uint_value(kcf->requests, 1000);
+ ngx_conf_init_uint_value(kcf->max_async_fails, 2);

if (kcf->original_init_upstream(cf, us) != NGX_OK) {
return NGX_ERROR;
@@ -320,6 +329,21 @@
u = kp->upstream;
c = pc->connection;

+ if (state & NGX_PEER_ASYNC_FAILED) {
+ pc->async_fails++;
+
+ if (pc->async_fails == 2) {
+ pc->async_fails = 0;
+ state = NGX_PEER_FAILED;
+
+ } else {
+ pc->tries++;
+ }
+ goto invalid;
+ }
+
+ pc->async_fails = 0;
+
if (state & NGX_PEER_FAILED
|| c == NULL
|| c->read->eof
@@ -529,6 +553,8 @@
conf->time = NGX_CONF_UNSET_MSEC;
conf->timeout = NGX_CONF_UNSET_MSEC;
conf->requests = NGX_CONF_UNSET_UINT;
+ conf->max_async_fails = NGX_CONF_UNSET_UINT;
+

return conf;
}
diff -r 5f1d05a21287 -r 9ec4d7a8cdf6 src/http/ngx_http_upstream.c
--- a/src/http/ngx_http_upstream.c Tue Mar 28 18:01:54 2023 +0300
+++ b/src/http/ngx_http_upstream.c Sun Apr 02 18:37:53 2023 +0100
@@ -4317,6 +4317,8 @@
{
state = NGX_PEER_NEXT;

+ } else if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) {
+ state = NGX_PEER_ASYNC_FAILED;
} else {
state = NGX_PEER_FAILED;
}
@@ -4330,11 +4332,6 @@
"upstream timed out");
}

- if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) {
- /* TODO: inform balancer instead */
- u->peer.tries++;
- }
-
switch (ft_type) {

case NGX_HTTP_UPSTREAM_FT_TIMEOUT:
@@ -4421,7 +4418,6 @@
return;
}
#endif
-
ngx_http_upstream_finalize_request(r, u, status);
return;
}
diff -r 5f1d05a21287 -r 9ec4d7a8cdf6 src/http/ngx_http_upstream_round_robin.c
--- a/src/http/ngx_http_upstream_round_robin.c Tue Mar 28 18:01:54 2023 +0300
+++ b/src/http/ngx_http_upstream_round_robin.c Sun Apr 02 18:37:53 2023 +0100
@@ -297,6 +297,7 @@
r->upstream->peer.get = ngx_http_upstream_get_round_robin_peer;
r->upstream->peer.free = ngx_http_upstream_free_round_robin_peer;
r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers);
+ r->upstream->peer.async_fails = 0;
#if (NGX_HTTP_SSL)
r->upstream->peer.set_session =
ngx_http_upstream_set_round_robin_peer_session;
@@ -418,6 +419,7 @@
r->upstream->peer.get = ngx_http_upstream_get_round_robin_peer;
r->upstream->peer.free = ngx_http_upstream_free_round_robin_peer;
r->upstream->peer.tries = ngx_http_upstream_tries(rrp->peers);
+ r->upstream->peer.async_fails = 0;
#if (NGX_HTTP_SSL)
r->upstream->peer.set_session = ngx_http_upstream_empty_set_session;
r->upstream->peer.save_session = ngx_http_upstream_empty_save_session;
@@ -459,7 +461,10 @@

rrp->current = peer;

- } else {
+ } else if (pc->async_fails > 0) {
+ peer = rrp->current;
+ }
+ else {

/* there are several peers */

@@ -615,18 +620,7 @@
ngx_http_upstream_rr_peers_rlock(rrp->peers);
ngx_http_upstream_rr_peer_lock(rrp->peers, peer);

- if (rrp->peers->single) {
-
- peer->conns--;
-
- ngx_http_upstream_rr_peer_unlock(rrp->peers, peer);
- ngx_http_upstream_rr_peers_unlock(rrp->peers);
-
- pc->tries = 0;
- return;
- }
-
- if (state & NGX_PEER_FAILED) {
+ if (state & NGX_PEER_FAILED && !rrp->peers->single) {
now = ngx_time();

peer->fails++;
#!/bin/sh
sudo nginx -s stop;
sudo nginx;
sleep 1;
seq 25 | xargs -I{} -P 25 curl localhost/$1
sleep 10;
sudo iptables -A INPUT -p tcp --destination-port 8080 -j REJECT --reject-with tcp-reset
sudo iptables -A INPUT -p tcp --destination-port 8081 -j REJECT --reject-with tcp-reset
sudo iptables -A INPUT -p tcp --destination-port 8082 -j REJECT --reject-with tcp-reset
sleep 5;
curl localhost/$1
sleep 3;
sudo iptables -D INPUT -p tcp --destination-port 8080 -j REJECT --reject-with tcp-reset
sudo iptables -D INPUT -p tcp --destination-port 8081 -j REJECT --reject-with tcp-reset
sudo iptables -D INPUT -p tcp --destination-port 8082 -j REJECT --reject-with tcp-reset
tail -n 10 /usr/local/nginx/logs/access.log

worker_processes 1;

error_log logs/error.log info;

events {
worker_connections 1024;
}

http {
log_format main '$time_local $http_drop $upstream_addr $upstream_status';

upstream backend1 {
server 127.0.0.1:8081;
keepalive 32;
#keepalive_async_fails 3;
}

upstream backend2 {
server 127.0.0.1:8080;
server 127.0.0.1:8081;
server 127.0.0.1:8082;
keepalive 32;
#keepalive_async_fails 3;
}

proxy_http_version 1.1;
proxy_set_header Connection "";

server {
listen 80;

access_log logs/access.log main;
location =/1 {
proxy_pass http://backend1;
}

location =/2 {
#proxy_next_upstream_tries 2;
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504 http_403 http_404 http_429 non_idempotent;
proxy_bind 127.0.0.2;
proxy_pass http://backend2;
}
}
server {
listen 8080;
listen 8081;
listen 8082;
access_log off;
error_log off;
if ($http_drop) {
return 444;
}
location / {
#slow it down, allows keepalives to be established on 80;
proxy_connect_timeout 5s;
error_page 504 = @return-200;
#blackhole ip
proxy_pass http://198.51.100.1:9999;
}

location @return-200 {
return 200 "OK";
}
}
}
_______________________________________________
nginx-devel mailing list
nginx-devel@nginx.org
https://mailman.nginx.org/mailman/listinfo/nginx-devel
Subject Author Views Posted

[PATCH] Added keepalive_async_fails command

J Carter 379 April 02, 2023 01:58PM

Re: [PATCH] Added keepalive_async_fails command

J Carter 115 April 02, 2023 02:32PM

Re: [PATCH] Added keepalive_async_fails command

Maxim Dounin 95 April 02, 2023 09:44PM

Re: [PATCH] Added keepalive_async_fails command

J Carter 211 April 03, 2023 01:16AM



Sorry, you do not have permission to post/reply in this forum.

Online Users

Guests: 321
Record Number of Users: 8 on April 13, 2023
Record Number of Guests: 421 on December 02, 2018
Powered by nginx      Powered by FreeBSD      PHP Powered      Powered by MariaDB      ipv6 ready