Maxim Dounin
September 04, 2011 07:48AM
# HG changeset patch
# User Maxim Dounin <mdounin@mdounin.ru>
# Date 1315040808 -14400
# Node ID 3c397bb4808ed5a8ec21e07eb9604b1766f723b8
# Parent 85fd18d013f15b975f7dd9cabbc5acbe02d12153
Keepalive support in fastcgi.

By default follow the old behaviour, i.e. FASTCGI_KEEP_CONN flag isn't set
in request and application is responsible for closing connection once request
is done. To keep connections alive fastcgi_keep_conn must be activated.

diff --git a/src/http/modules/ngx_http_fastcgi_module.c b/src/http/modules/ngx_http_fastcgi_module.c
--- a/src/http/modules/ngx_http_fastcgi_module.c
+++ b/src/http/modules/ngx_http_fastcgi_module.c
@@ -26,6 +26,8 @@ typedef struct {
ngx_hash_t headers_hash;
ngx_uint_t header_params;

+ ngx_flag_t keep_conn;
+
#if (NGX_HTTP_CACHE)
ngx_http_complex_value_t cache_key;
#endif
@@ -77,6 +79,8 @@ typedef struct {

#define NGX_HTTP_FASTCGI_RESPONDER 1

+#define NGX_HTTP_FASTCGI_KEEP_CONN 1
+
#define NGX_HTTP_FASTCGI_BEGIN_REQUEST 1
#define NGX_HTTP_FASTCGI_ABORT_REQUEST 2
#define NGX_HTTP_FASTCGI_END_REQUEST 3
@@ -130,6 +134,7 @@ static ngx_int_t ngx_http_fastcgi_create
static ngx_int_t ngx_http_fastcgi_create_request(ngx_http_request_t *r);
static ngx_int_t ngx_http_fastcgi_reinit_request(ngx_http_request_t *r);
static ngx_int_t ngx_http_fastcgi_process_header(ngx_http_request_t *r);
+static ngx_int_t ngx_http_fastcgi_input_filter_init(void *data);
static ngx_int_t ngx_http_fastcgi_input_filter(ngx_event_pipe_t *p,
ngx_buf_t *buf);
static ngx_int_t ngx_http_fastcgi_process_record(ngx_http_request_t *r,
@@ -437,6 +442,13 @@ static ngx_command_t ngx_http_fastcgi_c
offsetof(ngx_http_fastcgi_loc_conf_t, catch_stderr),
NULL },

+ { ngx_string("fastcgi_keep_conn"),
+ NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG,
+ ngx_conf_set_flag_slot,
+ NGX_HTTP_LOC_CONF_OFFSET,
+ offsetof(ngx_http_fastcgi_loc_conf_t, keep_conn),
+ NULL },
+
ngx_null_command
};

@@ -600,6 +612,8 @@ ngx_http_fastcgi_handler(ngx_http_reques
u->pipe->input_filter = ngx_http_fastcgi_input_filter;
u->pipe->input_ctx = r;

+ u->input_filter_init = ngx_http_fastcgi_input_filter_init;
+
rc = ngx_http_read_client_request_body(r, ngx_http_upstream_init);

if (rc >= NGX_HTTP_SPECIAL_RESPONSE) {
@@ -841,6 +855,9 @@ ngx_http_fastcgi_create_request(ngx_http

cl->buf = b;

+ ngx_http_fastcgi_request_start.br.flags =
+ flcf->keep_conn ? NGX_HTTP_FASTCGI_KEEP_CONN : 0;
+
ngx_memcpy(b->pos, &ngx_http_fastcgi_request_start,
sizeof(ngx_http_fastcgi_request_start_t));

@@ -1574,14 +1591,30 @@ ngx_http_fastcgi_process_header(ngx_http


static ngx_int_t
+ngx_http_fastcgi_input_filter_init(void *data)
+{
+ ngx_http_request_t *r = data;
+ ngx_http_fastcgi_loc_conf_t *flcf;
+
+ flcf = ngx_http_get_module_loc_conf(r, ngx_http_fastcgi_module);
+
+ r->upstream->pipe->length = flcf->keep_conn ?
+ (off_t) sizeof(ngx_http_fastcgi_header_t) : -1;
+
+ return NGX_OK;
+}
+
+
+static ngx_int_t
ngx_http_fastcgi_input_filter(ngx_event_pipe_t *p, ngx_buf_t *buf)
{
- u_char *m, *msg;
- ngx_int_t rc;
- ngx_buf_t *b, **prev;
- ngx_chain_t *cl;
- ngx_http_request_t *r;
- ngx_http_fastcgi_ctx_t *f;
+ u_char *m, *msg;
+ ngx_int_t rc;
+ ngx_buf_t *b, **prev;
+ ngx_chain_t *cl;
+ ngx_http_request_t *r;
+ ngx_http_fastcgi_ctx_t *f;
+ ngx_http_fastcgi_loc_conf_t *flcf;

if (buf->pos == buf->last) {
return NGX_OK;
@@ -1589,6 +1622,7 @@ ngx_http_fastcgi_input_filter(ngx_event_

r = p->input_ctx;
f = ngx_http_get_module_ctx(r, ngx_http_fastcgi_module);
+ flcf = ngx_http_get_module_loc_conf(r, ngx_http_fastcgi_module);

b = NULL;
prev = &buf->shadow;
@@ -1611,7 +1645,10 @@ ngx_http_fastcgi_input_filter(ngx_event_

if (f->type == NGX_HTTP_FASTCGI_STDOUT && f->length == 0) {
f->state = ngx_http_fastcgi_st_version;
- p->upstream_done = 1;
+
+ if (!flcf->keep_conn) {
+ p->upstream_done = 1;
+ }

ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0,
"http fastcgi closed stdout");
@@ -1623,6 +1660,10 @@ ngx_http_fastcgi_input_filter(ngx_event_
f->state = ngx_http_fastcgi_st_version;
p->upstream_done = 1;

+ if (flcf->keep_conn) {
+ r->upstream->keepalive = 1;
+ }
+
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0,
"http fastcgi sent end request");

@@ -1781,6 +1822,23 @@ ngx_http_fastcgi_input_filter(ngx_event_

}

+ if (flcf->keep_conn) {
+
+ /* set p->length, minimal amount of data we want to see */
+
+ if (f->state < ngx_http_fastcgi_st_data) {
+ p->length = 1;
+
+ } else if (f->state == ngx_http_fastcgi_st_padding) {
+ p->length = f->padding;
+
+ } else {
+ /* ngx_http_fastcgi_st_data */
+
+ p->length = f->length;
+ }
+ }
+
if (b) {
b->shadow = buf;
b->last_shadow = 1;
@@ -2011,6 +2069,8 @@ ngx_http_fastcgi_create_loc_conf(ngx_con

conf->catch_stderr = NGX_CONF_UNSET_PTR;

+ conf->keep_conn = NGX_CONF_UNSET;
+
ngx_str_set(&conf->upstream.module, "fastcgi");

return conf;
@@ -2254,6 +2314,8 @@ ngx_http_fastcgi_merge_loc_conf(ngx_conf

ngx_conf_merge_ptr_value(conf->catch_stderr, prev->catch_stderr, NULL);

+ ngx_conf_merge_value(conf->keep_conn, prev->keep_conn, 0);
+

ngx_conf_merge_str_value(conf->index, prev->index, "");


_______________________________________________
nginx-devel mailing list
nginx-devel@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx-devel
Subject Author Views Posted

[PATCH 00 of 15] upstream keepalive patch queue

Maxim Dounin 1956 September 04, 2011 07:46AM

[PATCH 01 of 15] Correct SSL shutdown handling

Maxim Dounin 865 September 04, 2011 07:46AM

[PATCH 02 of 15] Proper setting of read->eof in pipe code

Maxim Dounin 844 September 04, 2011 07:46AM

[PATCH 03 of 15] Workaround for cpu hog on errors with cached connections

Maxim Dounin 895 September 04, 2011 07:46AM

[PATCH 04 of 15] Upstream: separate pool for peer connections

Maxim Dounin 898 September 04, 2011 07:46AM

[PATCH 05 of 15] Upstream: content_length_n API change

Maxim Dounin 1091 September 04, 2011 07:46AM

[PATCH 06 of 15] Upstream: r->upstream->length type change to off_t

Maxim Dounin 788 September 04, 2011 07:46AM

[PATCH 07 of 15] Upstream: pipe length and input_filter_init in buffered mode

Maxim Dounin 900 September 04, 2011 07:48AM

[PATCH 08 of 15] Upstream: keepalive flag

Maxim Dounin 821 September 04, 2011 07:48AM

[PATCH 09 of 15] Keepalive support in memcached

Maxim Dounin 792 September 04, 2011 07:48AM

[PATCH 10 of 15] Keepalive support in fastcgi

Maxim Dounin 966 September 04, 2011 07:48AM

[PATCH 11 of 15] Upstream: process Transfer-Encoding header and detect chunked one

Maxim Dounin 915 September 04, 2011 07:48AM

[PATCH 12 of 15] Upstream: process Connection header and detect close token

Maxim Dounin 889 September 04, 2011 07:48AM

[PATCH 13 of 15] Protocol version parsing in ngx_http_parse_status_line()

Maxim Dounin 875 September 04, 2011 07:48AM

[PATCH 14 of 15] Proxy: basic HTTP/1.1 support (including keepalive)

Maxim Dounin 1019 September 04, 2011 07:48AM

[PATCH 15 of 15] Upstream keepalive module

Maxim Dounin 963 September 04, 2011 07:48AM

Re: [PATCH 00 of 15] upstream keepalive patch queue

Maxim Dounin 757 September 05, 2011 01:56PM

Re: [PATCH 00 of 15] upstream keepalive patch queue

splitice 1204 September 06, 2011 01:46AM



Sorry, you do not have permission to post/reply in this forum.

Online Users

Guests: 167
Record Number of Users: 8 on April 13, 2023
Record Number of Guests: 421 on December 02, 2018
Powered by nginx      Powered by FreeBSD      PHP Powered      Powered by MariaDB      ipv6 ready