Hi,
My setup goes as follows:
Clients -> F5 LB -> Nginx -> JBoss (Nginx reside on the same RHEL 6.5 with JBoss).
I was reading about nginx a lot and decided to implement it in my high traffic web server in order to gain performance.
I didn't had any problems until reached 2500 requests per minute which causes ~50 request to fail with nginx 502 error message.
After some investigation, I saw that the number JBoss threads(connections from nginx to jboss) were increasing until couldn't serve the requests.
For test, I disabled nginx and configured LB to directly interact with JBoss server and number of Established connections / JBoss threads decreased dramatically and all of the requests were answered.
I then ask myself, how can it be that the nginx uses keepalive but increse the backend ESTABLISHED connections untill JBoss couldn't process the request due to lack of threads.
disabling the keepalive is unacceptable due to TIME_WAIT connections being created in thousands until reaching the limit.
How is it possible that nginx uses keep alive and number of ESTABLISHED connections increased agains same environment without nginx but less threads and with keep alive per request only and not for the tens of requests?
Here is my configuration, you help is appreciated:
My nginx configurations looks like that:
# General configuration
user nginx;
worker_processes 10; # per number of cores
# Define the limit of open files per worker.
worker_rlimit_nofile 30720;
events {
# use epoll event handler
use epoll;
worker_connections 10240;
}
error_log /usr/local/var/log/nginx/nginx_error.log warn;
pid /var/run/nginx.pid;
http {
# Logging format
log_format main '$remote_addr - $remote_user [$time_local] ' '"$request_length" "$request_time" '
'"$request" $status $bytes_sent ' '"$body_bytes_sent" "$bytes_sent" ';
default_type application/octet-stream;
keepalive_timeout 300 300;
keepalive_requests 8000;
charset utf-8;
source_charset utf-8;
# Check if it makes errors.
ignore_invalid_headers on;
recursive_error_pages on;
sendfile on;
server_tokens off;
tcp_nodelay on;
tcp_nopush off;
proxy_intercept_errors on;
# Connection limiting and throttling
#limit_conn_zone $binary_remote_addr zone=my:10m;
#limit_conn_log_level notice;
# HTTP reverse proxy configuration
include http_server.conf;
}
http_server.conf file :
upstream backend {
server 127.0.0.1:8778;
# Number of idle keepalive connections per worker process.
keepalive 1;
}
server {
listen 10.0.5.25:8080;
listen 10.210.5.25:8080;
#listen 10.0.15.25:8080; # For debuging purpose only!
server_name ph-perf-z-cl1;
# Disable access log. to enable the access logs, uncomment the relevant and comment below line
access_log off;
# Clients get entity too large if body exceed this size.
client_max_body_size 50m;
# the maximum client request size of body, if exceed, write to temp directory the request.
client_body_buffer_size 1m;
include errors.conf;
location ~ /errors {
limit_except GET {
deny all;
}
root html;
}
location = /nginx_status {
#allow <MANG_NETWORK>/<NETMASK>;
#deny all;
stub_status on;
}
location /dps/ {
limit_except POST {
deny all;
}
proxy_pass http://backend;
proxy_buffering on;
include proxy.conf;
}
location = /favicon.ico {
return 204;
}
}
proxy.conf file:
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_redirect off;
proxy_read_timeout 80s;
proxy_send_timeout 80s;
proxy_set_header X-Forwarded-Proto http;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_hide_header X-Powered-By;
proxy_hide_header X-CWMP-impl;
proxy_headers_hash_max_size 1024;
proxy_headers_hash_bucket_size 128;
proxy_buffers 320 32k;
# The buffer used to store the begining of the response (HEADER) - applicable even if proxy_bufferi is off.
proxy_buffer_size 8k;
proxy_busy_buffers_size 128k;
# stores the name in hash table for fast access