From 0d35acb323690e817ccbbff7073496e349fc901d Mon Sep 17 00:00:00 2001 From: deep011 Date: Tue, 19 Apr 2016 12:46:07 +0800 Subject: [PATCH] add tcpkeepalive, tcpkeepidle, tcpkeepcnt and tcpkeepintvl for conf file. --- README.md | 46 +++++++++-------- src/nc.c | 21 +++++--- src/nc_conf.c | 40 +++++++++++++++ src/nc_conf.h | 10 ++++ src/nc_manage.c | 12 ++++- src/nc_proxy.c | 3 +- src/nc_server.h | 4 ++ src/nc_stats.c | 131 ------------------------------------------------ src/nc_thread.c | 29 +++++++---- src/nc_util.c | 46 +++++++++++++++++ src/nc_util.h | 1 + 11 files changed, 172 insertions(+), 171 deletions(-) diff --git a/README.md b/README.md index f5eff4a..49e82dc 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,25 @@ -# twemproxys (nutcrackers) [![Build Status](https://secure.travis-ci.org/twitter/twemproxys.png)](http://travis-ci.org/twitter/twemproxys) +# twemproxies (nutcrackers) [![Build Status](https://secure.travis-ci.org/twitter/twemproxies.png)](http://travis-ci.org/twitter/twemproxies) -**twemproxys** (pronounced "two-em-proxy"), aka **nutcrackers** is a fast and lightweight proxy for [memcached](http://www.memcached.org/) and [redis](http://redis.io/) protocol. It was built primarily to reduce the number of connections to the caching servers on the backend. This, together with protocol pipelining and sharding enables you to horizontally scale your distributed caching architecture. +**twemproxies** (pronounced "two-em-proxy"), aka **nutcrackers** is a multithread, fast and lightweight proxy for [memcached](http://www.memcached.org/) and [redis](http://redis.io/) protocol. It was built primarily to reduce the number of connections to the caching servers on the backend. This, together with protocol pipelining and sharding enables you to horizontally scale your distributed caching architecture. ## Build -To build twemproxys from [distribution tarball](https://drive.google.com/open?id=0B6pVMMV5F5dfMUdJV25abllhUWM&authuser=0): +To build twemproxies from [distribution tarball](https://drive.google.com/open?id=0B6pVMMV5F5dfMUdJV25abllhUWM&authuser=0): $ ./configure $ make $ sudo make install -To build twemproxys from [distribution tarball](https://drive.google.com/open?id=0B6pVMMV5F5dfMUdJV25abllhUWM&authuser=0) in _debug mode_: +To build twemproxies from [distribution tarball](https://drive.google.com/open?id=0B6pVMMV5F5dfMUdJV25abllhUWM&authuser=0) in _debug mode_: $ CFLAGS="-ggdb3 -O0" ./configure --enable-debug=full $ make $ sudo make install -To build twemproxys from source with _debug logs enabled_ and _assertions enabled_: +To build twemproxies from source with _debug logs enabled_ and _assertions enabled_: - $ git clone git@github.com:twitter/twemproxys.git - $ cd twemproxys + $ git clone git@github.com:twitter/twemproxies.git + $ cd twemproxies $ autoreconf -fvi $ ./configure --enable-debug=full $ make @@ -34,7 +34,7 @@ A quick checklist: ## Note -Try the follow command to get the twemproxys status: +Try the follow command to get the twemproxies status: printf "status\r\n" | nc manage_ip manage_port @@ -81,13 +81,13 @@ Try the follow command to get the twemproxys status: ## Zero Copy -In twemproxys, all the memory for incoming requests and outgoing responses is allocated in mbuf. Mbuf enables zero-copy because the same buffer on which a request was received from the client is used for forwarding it to the server. Similarly the same mbuf on which a response was received from the server is used for forwarding it to the client. +In twemproxies, all the memory for incoming requests and outgoing responses is allocated in mbuf. Mbuf enables zero-copy because the same buffer on which a request was received from the client is used for forwarding it to the server. Similarly the same mbuf on which a response was received from the server is used for forwarding it to the client. -Furthermore, memory for mbufs is managed using a reuse pool. This means that once mbuf is allocated, it is not deallocated, but just put back into the reuse pool. By default each mbuf chunk is set to 16K bytes in size. There is a trade-off between the mbuf size and number of concurrent connections twemproxys can support. A large mbuf size reduces the number of read syscalls made by twemproxys when reading requests or responses. However, with a large mbuf size, every active connection would use up 16K bytes of buffer which might be an issue when twemproxys is handling large number of concurrent connections from clients. When twemproxys is meant to handle a large number of concurrent client connections, you should set chunk size to a small value like 512 bytes using the -m or --mbuf-size=N argument. +Furthermore, memory for mbufs is managed using a reuse pool. This means that once mbuf is allocated, it is not deallocated, but just put back into the reuse pool. By default each mbuf chunk is set to 16K bytes in size. There is a trade-off between the mbuf size and number of concurrent connections twemproxies can support. A large mbuf size reduces the number of read syscalls made by twemproxies when reading requests or responses. However, with a large mbuf size, every active connection would use up 16K bytes of buffer which might be an issue when twemproxies is handling large number of concurrent connections from clients. When twemproxies is meant to handle a large number of concurrent client connections, you should set chunk size to a small value like 512 bytes using the -m or --mbuf-size=N argument. ## Configuration -twemproxys can be configured through a YAML file specified by the -c or --conf-file command-line argument on process start. The configuration file is used to specify the server pools and the servers within each pool that twemproxys manages. The configuration files parses and understands the following keys: +twemproxies can be configured through a YAML file specified by the -c or --conf-file command-line argument on process start. The configuration file is used to specify the server pools and the servers within each pool that twemproxies manages. The configuration files parses and understands the following keys: + **listen**: The listening address and port (name:port or ip:port) or an absolute path to sock file (e.g. /var/run/nutcrackers.sock) for this server pool. + **hash**: The name of the hash function. Possible values are: @@ -110,10 +110,14 @@ twemproxys can be configured through a YAML file specified by the -c or --conf-f + random + **timeout**: The timeout value in msec that we wait for to establish a connection to the server or receive a response from a server. By default, we wait indefinitely. + **backlog**: The TCP backlog argument. Defaults to 512. -+ **preconnect**: A boolean value that controls if twemproxys should preconnect to all the servers in this pool on process start. Defaults to false. ++ **tcpkeepalive**: A boolean value that controls if tcp keepalive enabled. Defaults to false. ++ **tcpkeepidle**: The time value in msec that a connection is in idle, and then twemproxy check this connection whether dead or not. ++ **tcpkeepcnt**: The number of tcpkeepalive attempt check if one idle connection dead times when the client always had no reply. ++ **tcpkeepintvl**: The time value in msec that the interval between every tcpkeepalive check when the client always had no reply. ++ **preconnect**: A boolean value that controls if twemproxies should preconnect to all the servers in this pool on process start. Defaults to false. + **redis**: A boolean value that controls if a server pool speaks redis or memcached protocol. Defaults to false. + **redis_auth**: Authenticate to the Redis server on connect. -+ **redis_db**: The DB number to use on the pool servers. Defaults to 0. Note: twemproxys will always present itself to clients as DB 0. ++ **redis_db**: The DB number to use on the pool servers. Defaults to 0. Note: twemproxies will always present itself to clients as DB 0. + **server_connections**: The maximum number of connections that can be opened to each server. By default, we open at most 1 server connection. + **auto_eject_hosts**: A boolean value that controls if server should be ejected temporarily when it fails consecutively server_failure_limit times. See [liveness recommendations](notes/recommendation.md#liveness) for information. Defaults to false. + **server_retry_timeout**: The timeout value in msec to wait for before retrying on a temporarily ejected server, when auto_eject_host is set to true. Defaults to 30000 msec. @@ -191,13 +195,13 @@ For example, the configuration file in [conf/nutcrackers.yml](conf/nutcrackers.y - 127.0.0.1:11214:100000 - 127.0.0.1:11215:1 -Finally, to make writing a syntactically correct configuration file easier, twemproxys provides a command-line argument -t or --test-conf that can be used to test the YAML configuration file for any syntax error. +Finally, to make writing a syntactically correct configuration file easier, twemproxies provides a command-line argument -t or --test-conf that can be used to test the YAML configuration file for any syntax error. ## Observability -Observability in twemproxys is through logs and stats. +Observability in twemproxies is through logs and stats. -twemproxys exposes stats at the granularity of server pool and servers per pool through the stats monitoring port. The stats are essentially JSON formatted key-value pairs, with the keys corresponding to counter names. By default stats are exposed on port 22222 and aggregated every 30 seconds. Both these values can be configured on program start using the -c or --conf-file and -i or --stats-interval command-line arguments respectively. You can print the description of all stats exported by using the -D or --describe-stats command-line argument. +twemproxies exposes stats at the granularity of server pool and servers per pool through the stats monitoring port. The stats are essentially JSON formatted key-value pairs, with the keys corresponding to counter names. By default stats are exposed on port 22222 and aggregated every 30 seconds. Both these values can be configured on program start using the -c or --conf-file and -i or --stats-interval command-line arguments respectively. You can print the description of all stats exported by using the -D or --describe-stats command-line argument. $ nutcrackers --describe-stats @@ -223,19 +227,19 @@ twemproxys exposes stats at the granularity of server pool and servers per pool out_queue "# requests in outgoing queue" out_queue_bytes "current request bytes in outgoing queue" -Logging in twemproxys is only available when twemproxys is built with logging enabled. By default logs are written to stderr. twemproxys can also be configured to write logs to a specific file through the -o or --output command-line argument. On a running twemproxys, we can turn log levels up and down by sending it SIGTTIN and SIGTTOU signals respectively and reopen log files by sending it SIGHUP signal. +Logging in twemproxies is only available when twemproxies is built with logging enabled. By default logs are written to stderr. twemproxies can also be configured to write logs to a specific file through the -o or --output command-line argument. On a running twemproxies, we can turn log levels up and down by sending it SIGTTIN and SIGTTOU signals respectively and reopen log files by sending it SIGHUP signal. ## Pipelining -twemproxys enables proxying multiple client connections onto one or few server connections. This architectural setup makes it ideal for pipelining requests and responses and hence saving on the round trip time. +twemproxies enables proxying multiple client connections onto one or few server connections. This architectural setup makes it ideal for pipelining requests and responses and hence saving on the round trip time. -For example, if twemproxys is proxying three client connections onto a single server and we get requests - 'get key\r\n', 'set key 0 0 3\r\nval\r\n' and 'delete key\r\n' on these three connections respectively, twemproxys would try to batch these requests and send them as a single message onto the server connection as 'get key\r\nset key 0 0 3\r\nval\r\ndelete key\r\n'. +For example, if twemproxies is proxying three client connections onto a single server and we get requests - 'get key\r\n', 'set key 0 0 3\r\nval\r\n' and 'delete key\r\n' on these three connections respectively, twemproxies would try to batch these requests and send them as a single message onto the server connection as 'get key\r\nset key 0 0 3\r\nval\r\ndelete key\r\n'. -Pipelining is the reason why twemproxys ends up doing better in terms of throughput even though it introduces an extra hop between the client and server. +Pipelining is the reason why twemproxies ends up doing better in terms of throughput even though it introduces an extra hop between the client and server. ## Deployment -If you are deploying twemproxys in production, you might consider reading through the [recommendation document](notes/recommendation.md) to understand the parameters you could tune in twemproxys to run it efficiently in the production environment. +If you are deploying twemproxies in production, you might consider reading through the [recommendation document](notes/recommendation.md) to understand the parameters you could tune in twemproxies to run it efficiently in the production environment. ## License diff --git a/src/nc.c b/src/nc.c index 9d998e1..0133d2c 100644 --- a/src/nc.c +++ b/src/nc.c @@ -465,21 +465,23 @@ nc_get_options(int argc, char **argv, struct instance *nci) * returns false */ static bool -nc_test_conf(struct instance *nci) +nc_test_conf(struct instance *nci, bool test) { struct conf *cf; cf = conf_create(nci->conf_filename); if (cf == NULL) { - log_stderr("nutcrackers: configuration file '%s' syntax is invalid", - nci->conf_filename); + if (test) + log_stderr("nutcrackers: configuration file '%s' syntax is invalid", + nci->conf_filename); return false; } conf_destroy(cf); - log_stderr("nutcrackers: configuration file '%s' syntax is ok", - nci->conf_filename); + if (test) + log_stderr("nutcrackers: configuration file '%s' syntax is ok", + nci->conf_filename); return true; } @@ -493,6 +495,11 @@ nc_pre_run(struct instance *nci) return status; } + if (!nc_test_conf(nci, false)) { + log_error("conf file %s is error", nci->conf_filename); + return NC_ERROR; + } + if (daemonize) { status = nc_daemonize(1); if (status != NC_OK) { @@ -556,7 +563,7 @@ nc_run(struct instance *nci) if (status != NC_OK) { return; } - + /* init the workers */ for (i = 0; i < worker_count; i ++) { worker = array_push(&workers); @@ -617,7 +624,7 @@ main(int argc, char **argv) } if (test_conf) { - if (!nc_test_conf(&nci)) { + if (!nc_test_conf(&nci, true)) { exit(1); } exit(0); diff --git a/src/nc_conf.c b/src/nc_conf.c index 62b17e3..1233ee7 100644 --- a/src/nc_conf.c +++ b/src/nc_conf.c @@ -106,6 +106,19 @@ static struct command conf_commands[] = { conf_add_server, offsetof(struct conf_pool, server) }, + { string("tcpkeepalive"), + conf_set_bool, + offsetof(struct conf_pool, tcpkeepalive) }, + { string("tcpkeepidle"), + conf_set_num, + offsetof(struct conf_pool, tcpkeepidle) }, + { string("tcpkeepintvl"), + conf_set_num, + offsetof(struct conf_pool, tcpkeepintvl) }, + { string("tcpkeepcnt"), + conf_set_num, + offsetof(struct conf_pool, tcpkeepcnt) }, + null_command }; @@ -205,6 +218,11 @@ conf_pool_init(struct conf_pool *cp, struct string *name) cp->valid = 0; + cp->tcpkeepalive = CONF_UNSET_NUM; + cp->tcpkeepidle = CONF_UNSET_NUM; + cp->tcpkeepintvl = CONF_UNSET_NUM; + cp->tcpkeepcnt = CONF_UNSET_NUM; + status = string_duplicate(&cp->name, name); if (status != NC_OK) { return status; @@ -296,6 +314,11 @@ conf_pool_each_transform(void *elem, void *data) sp->auto_eject_hosts = cp->auto_eject_hosts ? 1 : 0; sp->preconnect = cp->preconnect ? 1 : 0; + sp->tcpkeepalive = cp->tcpkeepalive ? 1 : 0; + sp->tcpkeepidle = cp->tcpkeepidle; + sp->tcpkeepintvl = cp->tcpkeepintvl; + sp->tcpkeepcnt = cp->tcpkeepcnt; + status = server_init(&sp->server, &cp->server, sp); if (status != NC_OK) { return status; @@ -1267,6 +1290,19 @@ conf_validate_pool(struct conf *cf, struct conf_pool *cp) cp->valid = 1; + if (cp->tcpkeepalive == CONF_UNSET_NUM) { + cp->tcpkeepalive = CONF_DEFAULT_TCPKEEPALIVE; + } + if (cp->tcpkeepidle == CONF_UNSET_NUM) { + cp->tcpkeepidle = CONF_DEFAULT_TCPKEEPIDLE; + } + if (cp->tcpkeepintvl == CONF_UNSET_NUM) { + cp->tcpkeepintvl = CONF_DEFAULT_TCPKEEPINTVL; + } + if (cp->tcpkeepcnt == CONF_UNSET_NUM) { + cp->tcpkeepcnt = CONF_DEFAULT_TCPKEEPCNT; + } + return NC_OK; } @@ -1385,6 +1421,10 @@ conf_create(char *filename) void conf_destroy(struct conf *cf) { + if (cf == NULL) { + return; + } + while (array_n(&cf->arg) != 0) { conf_pop_scalar(cf); } diff --git a/src/nc_conf.h b/src/nc_conf.h index ff30f56..bed36f9 100644 --- a/src/nc_conf.h +++ b/src/nc_conf.h @@ -55,6 +55,11 @@ #define CONF_DEFAULT_SERVER_CONNECTIONS 1 #define CONF_DEFAULT_KETAMA_PORT 11211 +#define CONF_DEFAULT_TCPKEEPALIVE 0 +#define CONF_DEFAULT_TCPKEEPIDLE -1 +#define CONF_DEFAULT_TCPKEEPINTVL -1 +#define CONF_DEFAULT_TCPKEEPCNT -1 + struct conf_listen { struct string pname; /* listen: as "hostname:port" */ struct string name; /* hostname:port */ @@ -93,6 +98,11 @@ struct conf_pool { int server_failure_limit; /* server_failure_limit: */ struct array server; /* servers: conf_server[] */ unsigned valid:1; /* valid? */ + + int tcpkeepalive; /* tcpkeepalive: */ + int tcpkeepidle; /* tcpkeepidle: */ + int tcpkeepintvl; /* tcpkeepintvl: */ + int tcpkeepcnt; /* tcpkeepcnt: */ }; struct conf { diff --git a/src/nc_manage.c b/src/nc_manage.c index 5114783..693e0b8 100644 --- a/src/nc_manage.c +++ b/src/nc_manage.c @@ -515,11 +515,12 @@ manage_init(struct context *ctx, char *addr, uint16_t port) manager->nc_conn_q = 0; TAILQ_INIT(&manager->c_conn_q); + string_init(&manager->addrstr); ctx->manager = manager; manager->ctx = ctx; - string_set_raw(&manager->addrstr,addr); + string_copy(&manager->addrstr, (uint8_t *)addr, (uint32_t)strlen(addr)); manager->port = port; memset(&manager->info, 0, sizeof(manager->info)); @@ -550,10 +551,19 @@ manage_deinit(struct context *ctx) struct manage *manager = ctx->manager; struct conn *p; + if (manager == NULL) { + return; + } + p = manager->p_conn; if (p != NULL) { p->close(ctx, p); } + + string_deinit(&manager->addrstr); + + nc_free(manager); + ctx->manager = NULL; } void diff --git a/src/nc_proxy.c b/src/nc_proxy.c index 09e9cf2..c221822 100644 --- a/src/nc_proxy.c +++ b/src/nc_proxy.c @@ -335,7 +335,6 @@ proxy_accept(struct context *ctx, struct conn *p) return NC_OK; } - if (p->source_type == NC_SOURCE_TYPE_PROXY) { c = conn_get(p->owner, true, NC_SOURCE_TYPE_PROXY, ctx->cb); if (c == NULL) { @@ -357,6 +356,8 @@ proxy_accept(struct context *ctx, struct conn *p) return status; } + nc_set_tcpkeepalive(c->sd, 0, 0, 0); + if (p->family == AF_INET || p->family == AF_INET6) { status = nc_set_tcpnodelay(c->sd); if (status < 0) { diff --git a/src/nc_server.h b/src/nc_server.h index d57711a..9013825 100644 --- a/src/nc_server.h +++ b/src/nc_server.h @@ -120,6 +120,10 @@ struct server_pool { unsigned auto_eject_hosts:1; /* auto_eject_hosts? */ unsigned preconnect:1; /* preconnect? */ unsigned redis:1; /* redis? */ + unsigned tcpkeepalive:1; /* tcp keepalive? */ + int tcpkeepidle; /* tcpkeep idle */ + int tcpkeepintvl; /* tcpkeep interval */ + int tcpkeepcnt; /* tcpkeep count */ }; void server_ref(struct conn *conn, void *owner); diff --git a/src/nc_stats.c b/src/nc_stats.c index 373e93f..49ba3ea 100644 --- a/src/nc_stats.c +++ b/src/nc_stats.c @@ -758,137 +758,6 @@ stats_make_rsp(struct stats *st) return NC_OK; } -static rstatus_t -stats_send_rsp(struct stats *st) -{ - rstatus_t status; - ssize_t n; - int sd; - - status = stats_make_rsp(st); - if (status != NC_OK) { - return status; - } - - sd = accept(st->sd, NULL, NULL); - if (sd < 0) { - log_error("accept on m %d failed: %s", st->sd, strerror(errno)); - return NC_ERROR; - } - - log_debug(LOG_VERB, "send stats on sd %d %d bytes", sd, st->buf.len); - - n = nc_sendn(sd, st->buf.data, st->buf.len); - if (n < 0) { - log_error("send stats on sd %d failed: %s", sd, strerror(errno)); - close(sd); - return NC_ERROR; - } - - close(sd); - - return NC_OK; -} - -static void -stats_loop_callback(void *arg1, void *arg2) -{ - struct stats *st = arg1; - int n = *((int *)arg2); - - /* aggregate stats from shadow (b) -> sum (c) */ - stats_aggregate1(st); - - if (n == 0) { - return; - } - - /* send aggregate stats sum (c) to collector */ - stats_send_rsp(st); -} - -static void * -stats_loop(void *arg) -{ - event_loop_stats(stats_loop_callback, arg); - return NULL; -} - -static rstatus_t -stats_listen(struct stats *st) -{ - rstatus_t status; - struct sockinfo si; - - status = nc_resolve(&st->addr, st->port, &si); - if (status < 0) { - return status; - } - - st->sd = socket(si.family, SOCK_STREAM, 0); - if (st->sd < 0) { - log_error("socket failed: %s", strerror(errno)); - return NC_ERROR; - } - - status = nc_set_reuseaddr(st->sd); - if (status < 0) { - log_error("set reuseaddr on m %d failed: %s", st->sd, strerror(errno)); - return NC_ERROR; - } - - status = bind(st->sd, (struct sockaddr *)&si.addr, si.addrlen); - if (status < 0) { - log_error("bind on m %d to addr '%.*s:%u' failed: %s", st->sd, - st->addr.len, st->addr.data, st->port, strerror(errno)); - return NC_ERROR; - } - - status = listen(st->sd, SOMAXCONN); - if (status < 0) { - log_error("listen on m %d failed: %s", st->sd, strerror(errno)); - return NC_ERROR; - } - - log_debug(LOG_NOTICE, "m %d listening on '%.*s:%u'", st->sd, - st->addr.len, st->addr.data, st->port); - - return NC_OK; -} - -static rstatus_t -stats_start_aggregator(struct stats *st) -{ - rstatus_t status; - - if (!stats_enabled) { - return NC_OK; - } - - status = stats_listen(st); - if (status != NC_OK) { - return status; - } - - status = pthread_create(&st->tid, NULL, stats_loop, st); - if (status < 0) { - log_error("stats aggregator create failed: %s", strerror(status)); - return NC_ERROR; - } - - return NC_OK; -} - -static void -stats_stop_aggregator(struct stats *st) -{ - if (!stats_enabled) { - return; - } - - close(st->sd); -} - struct stats * stats_create(struct context *ctx, char *source) { diff --git a/src/nc_thread.c b/src/nc_thread.c index ca3c0ad..fa832ac 100644 --- a/src/nc_thread.c +++ b/src/nc_thread.c @@ -243,8 +243,8 @@ client_accept(struct context *ctx, struct conn *notice) sui_free(su); c = conn_get(sp, true, sp->redis ? NC_SOURCE_TYPE_REDIS : NC_SOURCE_TYPE_MC, ctx->cb); if (c == NULL) { - log_error("get conn for c %d from pool %s failed: %s", - sd, sp->name, strerror(errno)); + log_error("get conn for c %d from pool %.*s failed: %s", + sd, sp->name.len, sp->name.data, strerror(errno)); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); @@ -257,30 +257,39 @@ client_accept(struct context *ctx, struct conn *notice) status = nc_set_nonblocking(c->sd); if (status < 0) { - log_error("set nonblock on c %d from pool %s failed: %s", - c->sd, sp->name, strerror(errno)); + log_error("set nonblock on c %d from pool %.*s failed: %s", + c->sd, sp->name.len, sp->name.data, strerror(errno)); c->close(ctx, c); return status; } + + if (sp->tcpkeepalive) { + status = nc_set_tcpkeepalive(c->sd, sp->tcpkeepidle, + sp->tcpkeepintvl, sp->tcpkeepcnt); + if (status != NC_OK) { + log_warn("set tcpkeepalive on c %d from pool %.*s failed, ignored: %s", + c->sd, sp->name.len, sp->name.data, strerror(errno)); + } + } if (sp->info.family == AF_INET || sp->info.family == AF_INET6) { status = nc_set_tcpnodelay(c->sd); if (status < 0) { - log_warn("set tcpnodelay on c %d from pool %s failed, ignored: %s", - c->sd, sp->name, strerror(errno)); + log_warn("set tcpnodelay on c %d from pool %.*s failed, ignored: %s", + c->sd, sp->name.len, sp->name.data, strerror(errno)); } } status = event_add_conn(ctx->evb, c); if (status < 0) { - log_error("event add conn from pool %s failed: %s", - sp->name, strerror(errno)); + log_error("event add conn from pool %.*s failed: %s", + sp->name.len, sp->name.data, strerror(errno)); c->close(ctx, c); return status; } - log_debug(LOG_DEBUG, "accepted c %d on pool %s from '%s'", - c->sd, sp->name.data, nc_unresolve_peer_desc(c->sd)); + log_debug(LOG_DEBUG, "accepted c %d on pool %.*s from '%s'", + c->sd, sp->name.len, sp->name.data, nc_unresolve_peer_desc(c->sd)); break; default: log_error("read error char '%c' from thread(id:%d) socketpairs[1] %d", diff --git a/src/nc_util.c b/src/nc_util.c index 1a8921d..b28c1bd 100644 --- a/src/nc_util.c +++ b/src/nc_util.c @@ -180,6 +180,52 @@ nc_get_rcvbuf(int sd) return size; } +int +nc_set_tcpkeepalive(int sd, int keepidle, int keepinterval, int keepcount) +{ + rstatus_t status; + int tcpkeepalive; + socklen_t len; + + tcpkeepalive = 1; + len = sizeof(tcpkeepalive); + + status = setsockopt(sd, SOL_SOCKET, SO_KEEPALIVE, &tcpkeepalive, len); + if (status < 0) { + log_error("setsockopt SO_KEEPALIVE call error(%s)", strerror(errno)); + return NC_ERROR; + } + + if (keepidle > 0) { + len = sizeof(keepidle); + status = setsockopt(sd, SOL_TCP, TCP_KEEPIDLE, &keepidle, len); + if (status < 0) { + log_error("setsockopt TCP_KEEPIDLE call error(%s)", strerror(errno)); + return NC_ERROR; + } + } + + if (keepinterval > 0) { + len = sizeof(keepinterval); + status = setsockopt(sd, SOL_TCP, TCP_KEEPINTVL, &keepinterval, len); + if (status < 0) { + log_error("setsockopt TCP_KEEPINTVL call error(%s)", strerror(errno)); + return NC_ERROR; + } + } + + if (keepcount > 0) { + len = sizeof(keepcount); + status = setsockopt(sd, SOL_TCP, TCP_KEEPCNT, &keepcount, len); + if (status < 0) { + log_error("setsockopt TCP_KEEPCNT call error(%s)", strerror(errno)); + return NC_ERROR; + } + } + + return NC_OK; +} + int _nc_atoi(uint8_t *line, size_t n) { diff --git a/src/nc_util.h b/src/nc_util.h index f718532..e9b4611 100644 --- a/src/nc_util.h +++ b/src/nc_util.h @@ -88,6 +88,7 @@ int nc_set_rcvbuf(int sd, int size); int nc_get_soerror(int sd); int nc_get_sndbuf(int sd); int nc_get_rcvbuf(int sd); +int nc_set_tcpkeepalive(int sd, int keepidle, int keepinterval, int keepcount); int _nc_atoi(uint8_t *line, size_t n); bool nc_valid_port(int n);