Newer
Older
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
if ((cl = get_header(&conn->request_info, "Content-Length")) != NULL) {
conn->content_len = strtoll(cl, NULL, 10);
} else if (!mg_strcasecmp(conn->request_info.request_method, "POST") ||
!mg_strcasecmp(conn->request_info.request_method, "PUT")) {
conn->content_len = -1;
} else {
conn->content_len = 0;
}
conn->birth_time = time(NULL);
}
return ebuf[0] == '\0';
}
struct mg_connection *mg_download(const char *host, int port, int use_ssl,
char *ebuf, size_t ebuf_len,
const char *fmt, ...) {
struct mg_connection *conn;
va_list ap;
va_start(ap, fmt);
ebuf[0] = '\0';
if ((conn = mg_connect(host, port, use_ssl, ebuf, ebuf_len)) == NULL) {
} else if (mg_vprintf(conn, fmt, ap) <= 0) {
snprintf(ebuf, ebuf_len, "%s", "Error sending request");
} else {
getreq(conn, ebuf, ebuf_len);
}
if (ebuf[0] != '\0' && conn != NULL) {
mg_close_connection(conn);
conn = NULL;
}
return conn;
}
static void process_new_connection(struct mg_connection *conn) {
struct mg_request_info *ri = &conn->request_info;
int keep_alive_enabled, keep_alive, discard_len;
char ebuf[100];
keep_alive_enabled = !strcmp(conn->ctx->config[ENABLE_KEEP_ALIVE], "yes");
keep_alive = 0;
// Important: on new connection, reset the receiving buffer. Credit goes
// to crule42.
conn->data_len = 0;
do {
if (!getreq(conn, ebuf, sizeof(ebuf))) {
send_http_error(conn, 500, "Server Error", "%s", ebuf);
} else if (!is_valid_uri(conn->request_info.uri)) {
snprintf(ebuf, sizeof(ebuf), "Invalid URI: [%s]", ri->uri);
send_http_error(conn, 400, "Bad Request", "%s", ebuf);
} else if (strcmp(ri->http_version, "1.0") &&
strcmp(ri->http_version, "1.1")) {
snprintf(ebuf, sizeof(ebuf), "Bad HTTP version: [%s]", ri->http_version);
send_http_error(conn, 505, "Bad HTTP version", "%s", ebuf);
}
if (ebuf[0] == '\0') {
handle_request(conn);
if (conn->ctx->callbacks.end_request != NULL) {
conn->ctx->callbacks.end_request(conn, conn->status_code);
}
log_access(conn);
}
if (ri->remote_user != NULL) {
free((void *) ri->remote_user);
// Important! When having connections with and without auth
// would cause double free and then crash
ri->remote_user = NULL;
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
}
// NOTE(lsm): order is important here. should_keep_alive() call
// is using parsed request, which will be invalid after memmove's below.
// Therefore, memorize should_keep_alive() result now for later use
// in loop exit condition.
keep_alive = conn->ctx->stop_flag == 0 && keep_alive_enabled &&
conn->content_len >= 0 && should_keep_alive(conn);
// Discard all buffered data for this request
discard_len = conn->content_len >= 0 && conn->request_len > 0 &&
conn->request_len + conn->content_len < (int64_t) conn->data_len ?
(int) (conn->request_len + conn->content_len) : conn->data_len;
assert(discard_len >= 0);
memmove(conn->buf, conn->buf + discard_len, conn->data_len - discard_len);
conn->data_len -= discard_len;
assert(conn->data_len >= 0);
assert(conn->data_len <= conn->buf_size);
} while (keep_alive);
}
// Worker threads take accepted socket from the queue
static int consume_socket(struct mg_context *ctx, struct socket *sp) {
(void) pthread_mutex_lock(&ctx->mutex);
DEBUG_TRACE(("going idle"));
// If the queue is empty, wait. We're idle at this point.
while (ctx->sq_head == ctx->sq_tail && ctx->stop_flag == 0) {
pthread_cond_wait(&ctx->sq_full, &ctx->mutex);
}
// If we're stopping, sq_head may be equal to sq_tail.
if (ctx->sq_head > ctx->sq_tail) {
// Copy socket from the queue and increment tail
*sp = ctx->queue[ctx->sq_tail % ARRAY_SIZE(ctx->queue)];
ctx->sq_tail++;
DEBUG_TRACE(("grabbed socket %d, going busy", sp->sock));
// Wrap pointers if needed
while (ctx->sq_tail > (int) ARRAY_SIZE(ctx->queue)) {
ctx->sq_tail -= ARRAY_SIZE(ctx->queue);
ctx->sq_head -= ARRAY_SIZE(ctx->queue);
}
}
(void) pthread_cond_signal(&ctx->sq_empty);
(void) pthread_mutex_unlock(&ctx->mutex);
return !ctx->stop_flag;
}
static void *worker_thread(void *thread_func_param) {
struct mg_context *ctx = (struct mg_context *) thread_func_param;
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
struct mg_connection *conn;
conn = (struct mg_connection *) calloc(1, sizeof(*conn) + MAX_REQUEST_SIZE);
if (conn == NULL) {
cry(fc(ctx), "%s", "Cannot create new connection struct, OOM");
} else {
conn->buf_size = MAX_REQUEST_SIZE;
conn->buf = (char *) (conn + 1);
conn->ctx = ctx;
conn->request_info.user_data = ctx->user_data;
// Call consume_socket() even when ctx->stop_flag > 0, to let it signal
// sq_empty condvar to wake up the master waiting in produce_socket()
while (consume_socket(ctx, &conn->client)) {
conn->birth_time = time(NULL);
// Fill in IP, port info early so even if SSL setup below fails,
// error handler would have the corresponding info.
// Thanks to Johannes Winkelmann for the patch.
// TODO(lsm): Fix IPv6 case
conn->request_info.remote_port = ntohs(conn->client.rsa.sin.sin_port);
memcpy(&conn->request_info.remote_ip,
&conn->client.rsa.sin.sin_addr.s_addr, 4);
conn->request_info.remote_ip = ntohl(conn->request_info.remote_ip);
conn->request_info.is_ssl = conn->client.is_ssl;
if (!conn->client.is_ssl
#ifndef NO_SSL
|| sslize(conn, conn->ctx->ssl_ctx, SSL_accept)
#endif
) {
process_new_connection(conn);
}
close_connection(conn);
}
free(conn);
}
// Signal master that we're done with connection and exiting
(void) pthread_mutex_lock(&ctx->mutex);
ctx->num_threads--;
(void) pthread_cond_signal(&ctx->cond);
assert(ctx->num_threads >= 0);
(void) pthread_mutex_unlock(&ctx->mutex);
DEBUG_TRACE(("exiting"));
return NULL;
}
// Master thread adds accepted socket to a queue
static void produce_socket(struct mg_context *ctx, const struct socket *sp) {
(void) pthread_mutex_lock(&ctx->mutex);
// If the queue is full, wait
while (ctx->stop_flag == 0 &&
ctx->sq_head - ctx->sq_tail >= (int) ARRAY_SIZE(ctx->queue)) {
(void) pthread_cond_wait(&ctx->sq_empty, &ctx->mutex);
}
if (ctx->sq_head - ctx->sq_tail < (int) ARRAY_SIZE(ctx->queue)) {
// Copy socket to the queue and increment head
ctx->queue[ctx->sq_head % ARRAY_SIZE(ctx->queue)] = *sp;
ctx->sq_head++;
DEBUG_TRACE(("queued socket %d", sp->sock));
}
(void) pthread_cond_signal(&ctx->sq_full);
(void) pthread_mutex_unlock(&ctx->mutex);
}
static int set_sock_timeout(SOCKET sock, int milliseconds) {
#ifdef _WIN32
DWORD t = milliseconds;
#else
struct timeval t;
t.tv_sec = milliseconds / 1000;
t.tv_usec = (milliseconds * 1000) % 1000000;
#endif
return setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (void *) &t, sizeof(t)) ||
setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (void *) &t, sizeof(t));
}
static void accept_new_connection(const struct socket *listener,
struct mg_context *ctx) {
struct socket so;
Sergey Lyubka
committed
char src_addr[IP_ADDR_STR_LEN];
socklen_t len = sizeof(so.rsa);
int on = 1;
if ((so.sock = accept(listener->sock, &so.rsa.sa, &len)) == INVALID_SOCKET) {
} else if (!check_acl(ctx, ntohl(* (uint32_t *) &so.rsa.sin.sin_addr))) {
sockaddr_to_string(src_addr, sizeof(src_addr), &so.rsa);
cry(fc(ctx), "%s: %s is not allowed to connect", __func__, src_addr);
closesocket(so.sock);
} else {
// Put so socket structure into the queue
DEBUG_TRACE(("Accepted socket %d", (int) so.sock));
Sergey Lyubka
committed
set_close_on_exec(so.sock);
so.is_ssl = listener->is_ssl;
so.ssl_redir = listener->ssl_redir;
getsockname(so.sock, &so.lsa.sa, &len);
// Set TCP keep-alive. This is needed because if HTTP-level keep-alive
// is enabled, and client resets the connection, server won't get
// TCP FIN or RST and will keep the connection open forever. With TCP
// keep-alive, next keep-alive handshake will figure out that the client
// is down and will close the server end.
// Thanks to Igor Klopov who suggested the patch.
setsockopt(so.sock, SOL_SOCKET, SO_KEEPALIVE, (void *) &on, sizeof(on));
set_sock_timeout(so.sock, atoi(ctx->config[REQUEST_TIMEOUT]));
produce_socket(ctx, &so);
}
}
static void *master_thread(void *thread_func_param) {
struct mg_context *ctx = (struct mg_context *) thread_func_param;
struct pollfd *pfd;
int i;
// Increase priority of the master thread
#if defined(_WIN32)
SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_ABOVE_NORMAL);
#endif
#if defined(ISSUE_317)
struct sched_param sched_param;
sched_param.sched_priority = sched_get_priority_max(SCHED_RR);
pthread_setschedparam(pthread_self(), SCHED_RR, &sched_param);
#endif
pfd = (struct pollfd *) calloc(ctx->num_listening_sockets, sizeof(pfd[0]));
while (pfd != NULL && ctx->stop_flag == 0) {
for (i = 0; i < ctx->num_listening_sockets; i++) {
pfd[i].fd = ctx->listening_sockets[i].sock;
pfd[i].events = POLLIN;
}
if (poll(pfd, ctx->num_listening_sockets, 200) > 0) {
for (i = 0; i < ctx->num_listening_sockets; i++) {
// NOTE(lsm): on QNX, poll() returns POLLRDNORM after the
// successfull poll, and POLLIN is defined as (POLLRDNORM | POLLRDBAND)
// Therefore, we're checking pfd[i].revents & POLLIN, not
// pfd[i].revents == POLLIN.
if (ctx->stop_flag == 0 && (pfd[i].revents & POLLIN)) {
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
accept_new_connection(&ctx->listening_sockets[i], ctx);
}
}
}
}
free(pfd);
DEBUG_TRACE(("stopping workers"));
// Stop signal received: somebody called mg_stop. Quit.
close_all_listening_sockets(ctx);
// Wakeup workers that are waiting for connections to handle.
pthread_cond_broadcast(&ctx->sq_full);
// Wait until all threads finish
(void) pthread_mutex_lock(&ctx->mutex);
while (ctx->num_threads > 0) {
(void) pthread_cond_wait(&ctx->cond, &ctx->mutex);
}
(void) pthread_mutex_unlock(&ctx->mutex);
// All threads exited, no sync is needed. Destroy mutex and condvars
(void) pthread_mutex_destroy(&ctx->mutex);
(void) pthread_cond_destroy(&ctx->cond);
(void) pthread_cond_destroy(&ctx->sq_empty);
(void) pthread_cond_destroy(&ctx->sq_full);
#if !defined(NO_SSL)
uninitialize_ssl(ctx);
#endif
DEBUG_TRACE(("exiting"));
// Signal mg_stop() that we're done.
// WARNING: This must be the very last thing this
// thread does, as ctx becomes invalid after this line.
ctx->stop_flag = 2;
return NULL;
}
static void free_context(struct mg_context *ctx) {
int i;
// Deallocate config parameters
for (i = 0; i < NUM_OPTIONS; i++) {
if (ctx->config[i] != NULL)
free(ctx->config[i]);
}
#ifndef NO_SSL
// Deallocate SSL context
if (ctx->ssl_ctx != NULL) {
SSL_CTX_free(ctx->ssl_ctx);
}
if (ssl_mutexes != NULL) {
free(ssl_mutexes);
ssl_mutexes = NULL;
}
#endif // !NO_SSL
// Deallocate context itself
free(ctx);
}
void mg_stop(struct mg_context *ctx) {
ctx->stop_flag = 1;
// Wait until mg_fini() stops
while (ctx->stop_flag != 2) {
(void) mg_sleep(10);
}
free_context(ctx);
#if defined(_WIN32) && !defined(__SYMBIAN32__)
(void) WSACleanup();
#endif // _WIN32
}
struct mg_context *mg_start(const struct mg_callbacks *callbacks,
void *user_data,
const char **options) {
struct mg_context *ctx;
const char *name, *value, *default_value;
int i;
#if defined(_WIN32) && !defined(__SYMBIAN32__)
WSADATA data;
WSAStartup(MAKEWORD(2,2), &data);
InitializeCriticalSection(&global_log_file_lock);
#endif // _WIN32
// Allocate context and initialize reasonable general case defaults.
// TODO(lsm): do proper error handling here.
if ((ctx = (struct mg_context *) calloc(1, sizeof(*ctx))) == NULL) {
return NULL;
}
ctx->callbacks = *callbacks;
ctx->user_data = user_data;
while (options && (name = *options++) != NULL) {
if ((i = get_option_index(name)) == -1) {
cry(fc(ctx), "Invalid option: %s", name);
free_context(ctx);
return NULL;
} else if ((value = *options++) == NULL) {
cry(fc(ctx), "%s: option value cannot be NULL", name);
free_context(ctx);
return NULL;
}
if (ctx->config[i] != NULL) {
cry(fc(ctx), "warning: %s: duplicate option", name);
free(ctx->config[i]);
}
ctx->config[i] = mg_strdup(value);
DEBUG_TRACE(("[%s] -> [%s]", name, value));
}
// Set default value if needed
for (i = 0; config_options[i * 2] != NULL; i++) {
default_value = config_options[i * 2 + 1];
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
if (ctx->config[i] == NULL && default_value != NULL) {
ctx->config[i] = mg_strdup(default_value);
}
}
// NOTE(lsm): order is important here. SSL certificates must
// be initialized before listening ports. UID must be set last.
if (!set_gpass_option(ctx) ||
#if !defined(NO_SSL)
!set_ssl_option(ctx) ||
#endif
!set_ports_option(ctx) ||
#if !defined(_WIN32)
!set_uid_option(ctx) ||
#endif
!set_acl_option(ctx)) {
free_context(ctx);
return NULL;
}
#if !defined(_WIN32) && !defined(__SYMBIAN32__)
// Ignore SIGPIPE signal, so if browser cancels the request, it
// won't kill the whole process.
(void) signal(SIGPIPE, SIG_IGN);
// Also ignoring SIGCHLD to let the OS to reap zombies properly.
(void) signal(SIGCHLD, SIG_IGN);
#endif // !_WIN32
(void) pthread_mutex_init(&ctx->mutex, NULL);
(void) pthread_cond_init(&ctx->cond, NULL);
(void) pthread_cond_init(&ctx->sq_empty, NULL);
(void) pthread_cond_init(&ctx->sq_full, NULL);
// Start master (listening) thread
mg_start_thread(master_thread, ctx);
// Start worker threads
for (i = 0; i < atoi(ctx->config[NUM_THREADS]); i++) {
if (mg_start_thread(worker_thread, ctx) != 0) {
cry(fc(ctx), "Cannot start worker thread: %ld", (long) ERRNO);
} else {
ctx->num_threads++;
}
}
return ctx;
}