Rizin
unix-like reverse engineering framework and cli tools
tcp.c
Go to the documentation of this file.
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include <assert.h>
23 #include <stdlib.h>
24 
25 #include "uv.h"
26 #include "internal.h"
27 #include "handle-inl.h"
28 #include "stream-inl.h"
29 #include "req-inl.h"
30 
31 
32 /*
33  * Threshold of active tcp streams for which to preallocate tcp read buffers.
34  * (Due to node slab allocator performing poorly under this pattern,
35  * the optimization is temporarily disabled (threshold=0). This will be
36  * revisited once node allocator is improved.)
37  */
38 const unsigned int uv_active_tcp_streams_threshold = 0;
39 
40 /*
41  * Number of simultaneous pending AcceptEx calls.
42  */
43 const unsigned int uv_simultaneous_server_accepts = 32;
44 
45 /* A zero-size buffer for use by uv_tcp_read */
46 static char uv_zero_[] = "";
47 
48 static int uv__tcp_nodelay(uv_tcp_t* handle, SOCKET socket, int enable) {
49  if (setsockopt(socket,
50  IPPROTO_TCP,
51  TCP_NODELAY,
52  (const char*)&enable,
53  sizeof enable) == -1) {
54  return WSAGetLastError();
55  }
56  return 0;
57 }
58 
59 
60 static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsigned int delay) {
61  if (setsockopt(socket,
62  SOL_SOCKET,
64  (const char*)&enable,
65  sizeof enable) == -1) {
66  return WSAGetLastError();
67  }
68 
69  if (enable && setsockopt(socket,
70  IPPROTO_TCP,
72  (const char*)&delay,
73  sizeof delay) == -1) {
74  return WSAGetLastError();
75  }
76 
77  return 0;
78 }
79 
80 
83  SOCKET socket,
84  int family,
85  int imported) {
86  DWORD yes = 1;
87  int non_ifs_lsp;
88  int err;
89 
90  if (handle->socket != INVALID_SOCKET)
91  return UV_EBUSY;
92 
93  /* Set the socket to nonblocking mode */
94  if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
95  return WSAGetLastError();
96  }
97 
98  /* Make the socket non-inheritable */
99  if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0))
100  return GetLastError();
101 
102  /* Associate it with the I/O completion port. Use uv_handle_t pointer as
103  * completion key. */
104  if (CreateIoCompletionPort((HANDLE)socket,
105  loop->iocp,
106  (ULONG_PTR)socket,
107  0) == NULL) {
108  if (imported) {
109  handle->flags |= UV_HANDLE_EMULATE_IOCP;
110  } else {
111  return GetLastError();
112  }
113  }
114 
115  if (family == AF_INET6) {
116  non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv6;
117  } else {
118  non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv4;
119  }
120 
121  if (!(handle->flags & UV_HANDLE_EMULATE_IOCP) && !non_ifs_lsp) {
122  UCHAR sfcnm_flags =
124  if (!SetFileCompletionNotificationModes((HANDLE) socket, sfcnm_flags))
125  return GetLastError();
127  }
128 
129  if (handle->flags & UV_HANDLE_TCP_NODELAY) {
131  if (err)
132  return err;
133  }
134 
135  /* TODO: Use stored delay. */
136  if (handle->flags & UV_HANDLE_TCP_KEEPALIVE) {
137  err = uv__tcp_keepalive(handle, socket, 1, 60);
138  if (err)
139  return err;
140  }
141 
142  handle->socket = socket;
143 
144  if (family == AF_INET6) {
145  handle->flags |= UV_HANDLE_IPV6;
146  } else {
147  assert(!(handle->flags & UV_HANDLE_IPV6));
148  }
149 
150  return 0;
151 }
152 
153 
155  int domain;
156 
157  /* Use the lower 8 bits for the domain */
158  domain = flags & 0xFF;
159  if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
160  return UV_EINVAL;
161 
162  if (flags & ~0xFF)
163  return UV_EINVAL;
164 
165  uv_stream_init(loop, (uv_stream_t*) handle, UV_TCP);
166  handle->tcp.serv.accept_reqs = NULL;
167  handle->tcp.serv.pending_accepts = NULL;
168  handle->socket = INVALID_SOCKET;
169  handle->reqs_pending = 0;
170  handle->tcp.serv.func_acceptex = NULL;
171  handle->tcp.conn.func_connectex = NULL;
172  handle->tcp.serv.processed_accepts = 0;
173  handle->delayed_error = 0;
174 
175  /* If anything fails beyond this point we need to remove the handle from
176  * the handle queue, since it was added by uv__handle_init in uv_stream_init.
177  */
178 
179  if (domain != AF_UNSPEC) {
180  SOCKET sock;
181  DWORD err;
182 
183  sock = socket(domain, SOCK_STREAM, 0);
184  if (sock == INVALID_SOCKET) {
185  err = WSAGetLastError();
186  QUEUE_REMOVE(&handle->handle_queue);
187  return uv_translate_sys_error(err);
188  }
189 
190  err = uv_tcp_set_socket(handle->loop, handle, sock, domain, 0);
191  if (err) {
192  closesocket(sock);
193  QUEUE_REMOVE(&handle->handle_queue);
194  return uv_translate_sys_error(err);
195  }
196 
197  }
198 
199  return 0;
200 }
201 
202 
205 }
206 
207 
209  int err;
210  unsigned int i;
211  uv_tcp_accept_t* req;
212 
213  if (handle->flags & UV_HANDLE_CONNECTION &&
214  handle->stream.conn.shutdown_req != NULL &&
215  handle->stream.conn.write_reqs_pending == 0) {
216 
217  UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
218 
219  err = 0;
220  if (handle->flags & UV_HANDLE_CLOSING) {
221  err = ERROR_OPERATION_ABORTED;
222  } else if (shutdown(handle->socket, SD_SEND) == SOCKET_ERROR) {
223  err = WSAGetLastError();
224  }
225 
226  if (handle->stream.conn.shutdown_req->cb) {
227  handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req,
229  }
230 
231  handle->stream.conn.shutdown_req = NULL;
233  return;
234  }
235 
236  if (handle->flags & UV_HANDLE_CLOSING &&
237  handle->reqs_pending == 0) {
238  assert(!(handle->flags & UV_HANDLE_CLOSED));
239 
240  if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) {
241  closesocket(handle->socket);
242  handle->socket = INVALID_SOCKET;
244  }
245 
246  if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) {
247  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
248  for (i = 0; i < uv_simultaneous_server_accepts; i++) {
249  req = &handle->tcp.serv.accept_reqs[i];
250  if (req->wait_handle != INVALID_HANDLE_VALUE) {
251  UnregisterWait(req->wait_handle);
252  req->wait_handle = INVALID_HANDLE_VALUE;
253  }
254  if (req->event_handle != NULL) {
255  CloseHandle(req->event_handle);
256  req->event_handle = NULL;
257  }
258  }
259  }
260 
261  uv__free(handle->tcp.serv.accept_reqs);
262  handle->tcp.serv.accept_reqs = NULL;
263  }
264 
265  if (handle->flags & UV_HANDLE_CONNECTION &&
266  handle->flags & UV_HANDLE_EMULATE_IOCP) {
267  if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
268  UnregisterWait(handle->read_req.wait_handle);
269  handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
270  }
271  if (handle->read_req.event_handle != NULL) {
272  CloseHandle(handle->read_req.event_handle);
273  handle->read_req.event_handle = NULL;
274  }
275  }
276 
278  loop->active_tcp_streams--;
279  }
280 }
281 
282 
283 /* Unlike on Unix, here we don't set SO_REUSEADDR, because it doesn't just
284  * allow binding to addresses that are in use by sockets in TIME_WAIT, it
285  * effectively allows 'stealing' a port which is in use by another application.
286  *
287  * SO_EXCLUSIVEADDRUSE is also not good here because it does check all sockets,
288  * regardless of state, so we'd get an error even if the port is in use by a
289  * socket in TIME_WAIT state.
290  *
291  * See issue #1360.
292  *
293  */
295  const struct sockaddr* addr,
296  unsigned int addrlen,
297  unsigned int flags) {
298  DWORD err;
299  int r;
300 
301  if (handle->socket == INVALID_SOCKET) {
302  SOCKET sock;
303 
304  /* Cannot set IPv6-only mode on non-IPv6 socket. */
305  if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
306  return ERROR_INVALID_PARAMETER;
307 
308  sock = socket(addr->sa_family, SOCK_STREAM, 0);
309  if (sock == INVALID_SOCKET) {
310  return WSAGetLastError();
311  }
312 
313  err = uv_tcp_set_socket(handle->loop, handle, sock, addr->sa_family, 0);
314  if (err) {
315  closesocket(sock);
316  return err;
317  }
318  }
319 
320 #ifdef IPV6_V6ONLY
321  if (addr->sa_family == AF_INET6) {
322  int on;
323 
324  on = (flags & UV_TCP_IPV6ONLY) != 0;
325 
326  /* TODO: how to handle errors? This may fail if there is no ipv4 stack
327  * available, or when run on XP/2003 which have no support for dualstack
328  * sockets. For now we're silently ignoring the error. */
329  setsockopt(handle->socket,
330  IPPROTO_IPV6,
331  IPV6_V6ONLY,
332  (const char*)&on,
333  sizeof on);
334  }
335 #endif
336 
337  r = bind(handle->socket, addr, addrlen);
338 
339  if (r == SOCKET_ERROR) {
340  err = WSAGetLastError();
341  if (err == WSAEADDRINUSE) {
342  /* Some errors are not to be reported until connect() or listen() */
343  handle->delayed_error = err;
344  } else {
345  return err;
346  }
347  }
348 
349  handle->flags |= UV_HANDLE_BOUND;
350 
351  return 0;
352 }
353 
354 
355 static void CALLBACK post_completion(void* context, BOOLEAN timed_out) {
356  uv_req_t* req;
357  uv_tcp_t* handle;
358 
359  req = (uv_req_t*) context;
360  assert(req != NULL);
361  handle = (uv_tcp_t*)req->data;
362  assert(handle != NULL);
363  assert(!timed_out);
364 
365  if (!PostQueuedCompletionStatus(handle->loop->iocp,
366  req->u.io.overlapped.InternalHigh,
367  0,
368  &req->u.io.overlapped)) {
369  uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
370  }
371 }
372 
373 
374 static void CALLBACK post_write_completion(void* context, BOOLEAN timed_out) {
375  uv_write_t* req;
376  uv_tcp_t* handle;
377 
378  req = (uv_write_t*) context;
379  assert(req != NULL);
380  handle = (uv_tcp_t*)req->handle;
381  assert(handle != NULL);
382  assert(!timed_out);
383 
384  if (!PostQueuedCompletionStatus(handle->loop->iocp,
385  req->u.io.overlapped.InternalHigh,
386  0,
387  &req->u.io.overlapped)) {
388  uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
389  }
390 }
391 
392 
393 static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
394  uv_loop_t* loop = handle->loop;
395  BOOL success;
396  DWORD bytes;
397  SOCKET accept_socket;
398  short family;
399 
401  assert(req->accept_socket == INVALID_SOCKET);
402 
403  /* choose family and extension function */
404  if (handle->flags & UV_HANDLE_IPV6) {
405  family = AF_INET6;
406  } else {
407  family = AF_INET;
408  }
409 
410  /* Open a socket for the accepted connection. */
411  accept_socket = socket(family, SOCK_STREAM, 0);
412  if (accept_socket == INVALID_SOCKET) {
413  SET_REQ_ERROR(req, WSAGetLastError());
415  handle->reqs_pending++;
416  return;
417  }
418 
419  /* Make the socket non-inheritable */
420  if (!SetHandleInformation((HANDLE) accept_socket, HANDLE_FLAG_INHERIT, 0)) {
421  SET_REQ_ERROR(req, GetLastError());
423  handle->reqs_pending++;
424  closesocket(accept_socket);
425  return;
426  }
427 
428  /* Prepare the overlapped structure. */
429  memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
430  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
431  assert(req->event_handle != NULL);
432  req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
433  }
434 
435  success = handle->tcp.serv.func_acceptex(handle->socket,
436  accept_socket,
437  (void*)req->accept_buffer,
438  0,
439  sizeof(struct sockaddr_storage),
440  sizeof(struct sockaddr_storage),
441  &bytes,
442  &req->u.io.overlapped);
443 
444  if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
445  /* Process the req without IOCP. */
446  req->accept_socket = accept_socket;
447  handle->reqs_pending++;
449  } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
450  /* The req will be processed with IOCP. */
451  req->accept_socket = accept_socket;
452  handle->reqs_pending++;
453  if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
454  req->wait_handle == INVALID_HANDLE_VALUE &&
455  !RegisterWaitForSingleObject(&req->wait_handle,
456  req->event_handle, post_completion, (void*) req,
457  INFINITE, WT_EXECUTEINWAITTHREAD)) {
458  SET_REQ_ERROR(req, GetLastError());
460  }
461  } else {
462  /* Make this req pending reporting an error. */
463  SET_REQ_ERROR(req, WSAGetLastError());
465  handle->reqs_pending++;
466  /* Destroy the preallocated client socket. */
467  closesocket(accept_socket);
468  /* Destroy the event handle */
469  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
470  CloseHandle(req->event_handle);
471  req->event_handle = NULL;
472  }
473  }
474 }
475 
476 
478  uv_read_t* req;
479  uv_buf_t buf;
480  int result;
481  DWORD bytes, flags;
482 
483  assert(handle->flags & UV_HANDLE_READING);
484  assert(!(handle->flags & UV_HANDLE_READ_PENDING));
485 
486  req = &handle->read_req;
487  memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
488 
489  /*
490  * Preallocate a read buffer if the number of active streams is below
491  * the threshold.
492  */
493  if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) {
494  handle->flags &= ~UV_HANDLE_ZERO_READ;
495  handle->tcp.conn.read_buffer = uv_buf_init(NULL, 0);
496  handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer);
497  if (handle->tcp.conn.read_buffer.base == NULL ||
498  handle->tcp.conn.read_buffer.len == 0) {
499  handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer);
500  return;
501  }
502  assert(handle->tcp.conn.read_buffer.base != NULL);
503  buf = handle->tcp.conn.read_buffer;
504  } else {
505  handle->flags |= UV_HANDLE_ZERO_READ;
506  buf.base = (char*) &uv_zero_;
507  buf.len = 0;
508  }
509 
510  /* Prepare the overlapped structure. */
511  memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
512  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
513  assert(req->event_handle != NULL);
514  req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
515  }
516 
517  flags = 0;
518  result = WSARecv(handle->socket,
519  (WSABUF*)&buf,
520  1,
521  &bytes,
522  &flags,
523  &req->u.io.overlapped,
524  NULL);
525 
526  handle->flags |= UV_HANDLE_READ_PENDING;
527  handle->reqs_pending++;
528 
529  if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
530  /* Process the req without IOCP. */
531  req->u.io.overlapped.InternalHigh = bytes;
533  } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
534  /* The req will be processed with IOCP. */
535  if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
536  req->wait_handle == INVALID_HANDLE_VALUE &&
537  !RegisterWaitForSingleObject(&req->wait_handle,
538  req->event_handle, post_completion, (void*) req,
539  INFINITE, WT_EXECUTEINWAITTHREAD)) {
540  SET_REQ_ERROR(req, GetLastError());
542  }
543  } else {
544  /* Make this req pending reporting an error. */
545  SET_REQ_ERROR(req, WSAGetLastError());
547  }
548 }
549 
550 
552  struct linger l = { 1, 0 };
553 
554  /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
555  if (handle->flags & UV_HANDLE_SHUTTING)
556  return UV_EINVAL;
557 
558  if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, (const char*)&l, sizeof(l)))
559  return uv_translate_sys_error(WSAGetLastError());
560 
561  uv_close((uv_handle_t*) handle, close_cb);
562  return 0;
563 }
564 
565 
567  unsigned int i, simultaneous_accepts;
568  uv_tcp_accept_t* req;
569  int err;
570 
571  assert(backlog > 0);
572 
573  if (handle->flags & UV_HANDLE_LISTENING) {
574  handle->stream.serv.connection_cb = cb;
575  }
576 
577  if (handle->flags & UV_HANDLE_READING) {
578  return WSAEISCONN;
579  }
580 
581  if (handle->delayed_error) {
582  return handle->delayed_error;
583  }
584 
585  if (!(handle->flags & UV_HANDLE_BOUND)) {
587  (const struct sockaddr*) &uv_addr_ip4_any_,
588  sizeof(uv_addr_ip4_any_),
589  0);
590  if (err)
591  return err;
592  if (handle->delayed_error)
593  return handle->delayed_error;
594  }
595 
596  if (!handle->tcp.serv.func_acceptex) {
597  if (!uv_get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) {
598  return WSAEAFNOSUPPORT;
599  }
600  }
601 
602  if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
603  listen(handle->socket, backlog) == SOCKET_ERROR) {
604  return WSAGetLastError();
605  }
606 
607  handle->flags |= UV_HANDLE_LISTENING;
608  handle->stream.serv.connection_cb = cb;
610 
611  simultaneous_accepts = handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT ? 1
613 
614  if (handle->tcp.serv.accept_reqs == NULL) {
615  handle->tcp.serv.accept_reqs =
616  uv__malloc(uv_simultaneous_server_accepts * sizeof(uv_tcp_accept_t));
617  if (!handle->tcp.serv.accept_reqs) {
618  uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
619  }
620 
621  for (i = 0; i < simultaneous_accepts; i++) {
622  req = &handle->tcp.serv.accept_reqs[i];
623  UV_REQ_INIT(req, UV_ACCEPT);
624  req->accept_socket = INVALID_SOCKET;
625  req->data = handle;
626 
627  req->wait_handle = INVALID_HANDLE_VALUE;
628  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
629  req->event_handle = CreateEvent(NULL, 0, 0, NULL);
630  if (req->event_handle == NULL) {
631  uv_fatal_error(GetLastError(), "CreateEvent");
632  }
633  } else {
634  req->event_handle = NULL;
635  }
636 
638  }
639 
640  /* Initialize other unused requests too, because uv_tcp_endgame doesn't
641  * know how many requests were initialized, so it will try to clean up
642  * {uv_simultaneous_server_accepts} requests. */
643  for (i = simultaneous_accepts; i < uv_simultaneous_server_accepts; i++) {
644  req = &handle->tcp.serv.accept_reqs[i];
645  UV_REQ_INIT(req, UV_ACCEPT);
646  req->accept_socket = INVALID_SOCKET;
647  req->data = handle;
648  req->wait_handle = INVALID_HANDLE_VALUE;
649  req->event_handle = NULL;
650  }
651  }
652 
653  return 0;
654 }
655 
656 
657 int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
658  uv_loop_t* loop = server->loop;
659  int err = 0;
660  int family;
661 
662  uv_tcp_accept_t* req = server->tcp.serv.pending_accepts;
663 
664  if (!req) {
665  /* No valid connections found, so we error out. */
666  return WSAEWOULDBLOCK;
667  }
668 
669  if (req->accept_socket == INVALID_SOCKET) {
670  return WSAENOTCONN;
671  }
672 
673  if (server->flags & UV_HANDLE_IPV6) {
674  family = AF_INET6;
675  } else {
676  family = AF_INET;
677  }
678 
679  err = uv_tcp_set_socket(client->loop,
680  client,
681  req->accept_socket,
682  family,
683  0);
684  if (err) {
685  closesocket(req->accept_socket);
686  } else {
687  uv_connection_init((uv_stream_t*) client);
688  /* AcceptEx() implicitly binds the accepted socket. */
690  }
691 
692  /* Prepare the req to pick up a new connection */
693  server->tcp.serv.pending_accepts = req->next_pending;
694  req->next_pending = NULL;
695  req->accept_socket = INVALID_SOCKET;
696 
697  if (!(server->flags & UV_HANDLE_CLOSING)) {
698  /* Check if we're in a middle of changing the number of pending accepts. */
699  if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) {
700  uv_tcp_queue_accept(server, req);
701  } else {
702  /* We better be switching to a single pending accept. */
703  assert(server->flags & UV_HANDLE_TCP_SINGLE_ACCEPT);
704 
705  server->tcp.serv.processed_accepts++;
706 
707  if (server->tcp.serv.processed_accepts >= uv_simultaneous_server_accepts) {
708  server->tcp.serv.processed_accepts = 0;
709  /*
710  * All previously queued accept requests are now processed.
711  * We now switch to queueing just a single accept.
712  */
713  uv_tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]);
714  server->flags &= ~UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
715  server->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
716  }
717  }
718  }
719 
720  loop->active_tcp_streams++;
721 
722  return err;
723 }
724 
725 
727  uv_read_cb read_cb) {
728  uv_loop_t* loop = handle->loop;
729 
730  handle->flags |= UV_HANDLE_READING;
731  handle->read_cb = read_cb;
732  handle->alloc_cb = alloc_cb;
734 
735  /* If reading was stopped and then started again, there could still be a read
736  * request pending. */
737  if (!(handle->flags & UV_HANDLE_READ_PENDING)) {
738  if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
739  handle->read_req.event_handle == NULL) {
740  handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL);
741  if (handle->read_req.event_handle == NULL) {
742  uv_fatal_error(GetLastError(), "CreateEvent");
743  }
744  }
746  }
747 
748  return 0;
749 }
750 
751 static int uv__is_loopback(const struct sockaddr_storage* storage) {
752  const struct sockaddr_in* in4;
753  const struct sockaddr_in6* in6;
754  int i;
755 
756  if (storage->ss_family == AF_INET) {
757  in4 = (const struct sockaddr_in*) storage;
758  return in4->sin_addr.S_un.S_un_b.s_b1 == 127;
759  }
760  if (storage->ss_family == AF_INET6) {
761  in6 = (const struct sockaddr_in6*) storage;
762  for (i = 0; i < 7; ++i) {
763  if (in6->sin6_addr.u.Word[i] != 0)
764  return 0;
765  }
766  return in6->sin6_addr.u.Word[7] == htons(1);
767  }
768  return 0;
769 }
770 
771 // Check if Windows version is 10.0.16299 or later
773  OSVERSIONINFOW os_info;
774  if (!pRtlGetVersion)
775  return 0;
776  pRtlGetVersion(&os_info);
777  if (os_info.dwMajorVersion < 10)
778  return 0;
779  if (os_info.dwMajorVersion > 10)
780  return 1;
781  if (os_info.dwMinorVersion > 0)
782  return 1;
783  return os_info.dwBuildNumber >= 16299;
784 }
785 
787  uv_tcp_t* handle,
788  const struct sockaddr* addr,
789  unsigned int addrlen,
790  uv_connect_cb cb) {
791  uv_loop_t* loop = handle->loop;
792  TCP_INITIAL_RTO_PARAMETERS retransmit_ioctl;
793  const struct sockaddr* bind_addr;
794  struct sockaddr_storage converted;
795  BOOL success;
796  DWORD bytes;
797  int err;
798 
800  if (err)
801  return err;
802 
803  if (handle->delayed_error) {
804  return handle->delayed_error;
805  }
806 
807  if (!(handle->flags & UV_HANDLE_BOUND)) {
808  if (addrlen == sizeof(uv_addr_ip4_any_)) {
809  bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
810  } else if (addrlen == sizeof(uv_addr_ip6_any_)) {
811  bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
812  } else {
813  abort();
814  }
815  err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0);
816  if (err)
817  return err;
818  if (handle->delayed_error)
819  return handle->delayed_error;
820  }
821 
822  if (!handle->tcp.conn.func_connectex) {
823  if (!uv_get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) {
824  return WSAEAFNOSUPPORT;
825  }
826  }
827 
828  /* This makes connect() fail instantly if the target port on the localhost
829  * is not reachable, instead of waiting for 2s. We do not care if this fails.
830  * This only works on Windows version 10.0.16299 and later.
831  */
833  memset(&retransmit_ioctl, 0, sizeof(retransmit_ioctl));
834  retransmit_ioctl.Rtt = TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS;
836  WSAIoctl(handle->socket,
838  &retransmit_ioctl,
839  sizeof(retransmit_ioctl),
840  NULL,
841  0,
842  &bytes,
843  NULL,
844  NULL);
845  }
846 
847  UV_REQ_INIT(req, UV_CONNECT);
848  req->handle = (uv_stream_t*) handle;
849  req->cb = cb;
850  memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
851 
852  success = handle->tcp.conn.func_connectex(handle->socket,
853  (const struct sockaddr*) &converted,
854  addrlen,
855  NULL,
856  0,
857  &bytes,
858  &req->u.io.overlapped);
859 
860  if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
861  /* Process the req without IOCP. */
862  handle->reqs_pending++;
865  } else if (UV_SUCCEEDED_WITH_IOCP(success)) {
866  /* The req will be processed with IOCP. */
867  handle->reqs_pending++;
869  } else {
870  return WSAGetLastError();
871  }
872 
873  return 0;
874 }
875 
876 
878  struct sockaddr* name,
879  int* namelen) {
880 
881  return uv__getsockpeername((const uv_handle_t*) handle,
882  getsockname,
883  name,
884  namelen,
885  handle->delayed_error);
886 }
887 
888 
890  struct sockaddr* name,
891  int* namelen) {
892 
893  return uv__getsockpeername((const uv_handle_t*) handle,
894  getpeername,
895  name,
896  namelen,
897  handle->delayed_error);
898 }
899 
900 
902  uv_write_t* req,
903  uv_tcp_t* handle,
904  const uv_buf_t bufs[],
905  unsigned int nbufs,
906  uv_write_cb cb) {
907  int result;
908  DWORD bytes;
909 
910  UV_REQ_INIT(req, UV_WRITE);
911  req->handle = (uv_stream_t*) handle;
912  req->cb = cb;
913 
914  /* Prepare the overlapped structure. */
915  memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
916  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
917  req->event_handle = CreateEvent(NULL, 0, 0, NULL);
918  if (req->event_handle == NULL) {
919  uv_fatal_error(GetLastError(), "CreateEvent");
920  }
921  req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
922  req->wait_handle = INVALID_HANDLE_VALUE;
923  }
924 
925  result = WSASend(handle->socket,
926  (WSABUF*) bufs,
927  nbufs,
928  &bytes,
929  0,
930  &req->u.io.overlapped,
931  NULL);
932 
933  if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
934  /* Request completed immediately. */
935  req->u.io.queued_bytes = 0;
936  handle->reqs_pending++;
937  handle->stream.conn.write_reqs_pending++;
940  } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
941  /* Request queued by the kernel. */
942  req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
943  handle->reqs_pending++;
944  handle->stream.conn.write_reqs_pending++;
946  handle->write_queue_size += req->u.io.queued_bytes;
947  if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
948  !RegisterWaitForSingleObject(&req->wait_handle,
949  req->event_handle, post_write_completion, (void*) req,
950  INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) {
951  SET_REQ_ERROR(req, GetLastError());
953  }
954  } else {
955  /* Send failed due to an error, report it later */
956  req->u.io.queued_bytes = 0;
957  handle->reqs_pending++;
958  handle->stream.conn.write_reqs_pending++;
960  SET_REQ_ERROR(req, WSAGetLastError());
962  }
963 
964  return 0;
965 }
966 
967 
969  const uv_buf_t bufs[],
970  unsigned int nbufs) {
971  int result;
972  DWORD bytes;
973 
974  if (handle->stream.conn.write_reqs_pending > 0)
975  return UV_EAGAIN;
976 
977  result = WSASend(handle->socket,
978  (WSABUF*) bufs,
979  nbufs,
980  &bytes,
981  0,
982  NULL,
983  NULL);
984 
985  if (result == SOCKET_ERROR)
986  return uv_translate_sys_error(WSAGetLastError());
987  else
988  return bytes;
989 }
990 
991 
993  uv_req_t* req) {
994  DWORD bytes, flags, err;
995  uv_buf_t buf;
996  int count;
997 
998  assert(handle->type == UV_TCP);
999 
1000  handle->flags &= ~UV_HANDLE_READ_PENDING;
1001 
1002  if (!REQ_SUCCESS(req)) {
1003  /* An error occurred doing the read. */
1004  if ((handle->flags & UV_HANDLE_READING) ||
1005  !(handle->flags & UV_HANDLE_ZERO_READ)) {
1006  handle->flags &= ~UV_HANDLE_READING;
1008  buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
1009  uv_buf_init(NULL, 0) : handle->tcp.conn.read_buffer;
1010 
1012 
1013  if (err == WSAECONNABORTED) {
1014  /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix.
1015  */
1016  err = WSAECONNRESET;
1017  }
1018 
1019  handle->read_cb((uv_stream_t*)handle,
1021  &buf);
1022  }
1023  } else {
1024  if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
1025  /* The read was done with a non-zero buffer length. */
1026  if (req->u.io.overlapped.InternalHigh > 0) {
1027  /* Successful read */
1028  handle->read_cb((uv_stream_t*)handle,
1029  req->u.io.overlapped.InternalHigh,
1030  &handle->tcp.conn.read_buffer);
1031  /* Read again only if bytes == buf.len */
1032  if (req->u.io.overlapped.InternalHigh < handle->tcp.conn.read_buffer.len) {
1033  goto done;
1034  }
1035  } else {
1036  /* Connection closed */
1037  if (handle->flags & UV_HANDLE_READING) {
1038  handle->flags &= ~UV_HANDLE_READING;
1040  }
1041  handle->flags &= ~UV_HANDLE_READABLE;
1042 
1043  buf.base = 0;
1044  buf.len = 0;
1045  handle->read_cb((uv_stream_t*)handle, UV_EOF, &handle->tcp.conn.read_buffer);
1046  goto done;
1047  }
1048  }
1049 
1050  /* Do nonblocking reads until the buffer is empty */
1051  count = 32;
1052  while ((handle->flags & UV_HANDLE_READING) && (count-- > 0)) {
1053  buf = uv_buf_init(NULL, 0);
1054  handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);
1055  if (buf.base == NULL || buf.len == 0) {
1056  handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf);
1057  break;
1058  }
1059  assert(buf.base != NULL);
1060 
1061  flags = 0;
1062  if (WSARecv(handle->socket,
1063  (WSABUF*)&buf,
1064  1,
1065  &bytes,
1066  &flags,
1067  NULL,
1068  NULL) != SOCKET_ERROR) {
1069  if (bytes > 0) {
1070  /* Successful read */
1071  handle->read_cb((uv_stream_t*)handle, bytes, &buf);
1072  /* Read again only if bytes == buf.len */
1073  if (bytes < buf.len) {
1074  break;
1075  }
1076  } else {
1077  /* Connection closed */
1080 
1081  handle->read_cb((uv_stream_t*)handle, UV_EOF, &buf);
1082  break;
1083  }
1084  } else {
1085  err = WSAGetLastError();
1086  if (err == WSAEWOULDBLOCK) {
1087  /* Read buffer was completely empty, report a 0-byte read. */
1088  handle->read_cb((uv_stream_t*)handle, 0, &buf);
1089  } else {
1090  /* Ouch! serious error. */
1091  handle->flags &= ~UV_HANDLE_READING;
1093 
1094  if (err == WSAECONNABORTED) {
1095  /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with
1096  * Unix. */
1097  err = WSAECONNRESET;
1098  }
1099 
1100  handle->read_cb((uv_stream_t*)handle,
1102  &buf);
1103  }
1104  break;
1105  }
1106  }
1107 
1108 done:
1109  /* Post another read if still reading and not closing. */
1110  if ((handle->flags & UV_HANDLE_READING) &&
1111  !(handle->flags & UV_HANDLE_READ_PENDING)) {
1113  }
1114  }
1115 
1117 }
1118 
1119 
1121  uv_write_t* req) {
1122  int err;
1123 
1124  assert(handle->type == UV_TCP);
1125 
1126  assert(handle->write_queue_size >= req->u.io.queued_bytes);
1127  handle->write_queue_size -= req->u.io.queued_bytes;
1128 
1130 
1131  if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
1132  if (req->wait_handle != INVALID_HANDLE_VALUE) {
1133  UnregisterWait(req->wait_handle);
1134  req->wait_handle = INVALID_HANDLE_VALUE;
1135  }
1136  if (req->event_handle != NULL) {
1137  CloseHandle(req->event_handle);
1138  req->event_handle = NULL;
1139  }
1140  }
1141 
1142  if (req->cb) {
1144  if (err == UV_ECONNABORTED) {
1145  /* use UV_ECANCELED for consistency with Unix */
1146  err = UV_ECANCELED;
1147  }
1148  req->cb(req, err);
1149  }
1150 
1151  handle->stream.conn.write_reqs_pending--;
1152  if (handle->stream.conn.shutdown_req != NULL &&
1153  handle->stream.conn.write_reqs_pending == 0) {
1155  }
1156 
1158 }
1159 
1160 
1162  uv_req_t* raw_req) {
1163  uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req;
1164  int err;
1165 
1166  assert(handle->type == UV_TCP);
1167 
1168  /* If handle->accepted_socket is not a valid socket, then uv_queue_accept
1169  * must have failed. This is a serious error. We stop accepting connections
1170  * and report this error to the connection callback. */
1171  if (req->accept_socket == INVALID_SOCKET) {
1172  if (handle->flags & UV_HANDLE_LISTENING) {
1173  handle->flags &= ~UV_HANDLE_LISTENING;
1175  if (handle->stream.serv.connection_cb) {
1177  handle->stream.serv.connection_cb((uv_stream_t*)handle,
1179  }
1180  }
1181  } else if (REQ_SUCCESS(req) &&
1182  setsockopt(req->accept_socket,
1183  SOL_SOCKET,
1184  SO_UPDATE_ACCEPT_CONTEXT,
1185  (char*)&handle->socket,
1186  sizeof(handle->socket)) == 0) {
1187  req->next_pending = handle->tcp.serv.pending_accepts;
1188  handle->tcp.serv.pending_accepts = req;
1189 
1190  /* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */
1191  if (handle->stream.serv.connection_cb) {
1192  handle->stream.serv.connection_cb((uv_stream_t*)handle, 0);
1193  }
1194  } else {
1195  /* Error related to accepted socket is ignored because the server socket
1196  * may still be healthy. If the server socket is broken uv_queue_accept
1197  * will detect it. */
1198  closesocket(req->accept_socket);
1199  req->accept_socket = INVALID_SOCKET;
1200  if (handle->flags & UV_HANDLE_LISTENING) {
1202  }
1203  }
1204 
1206 }
1207 
1208 
1210  uv_connect_t* req) {
1211  int err;
1212 
1213  assert(handle->type == UV_TCP);
1214 
1216 
1217  err = 0;
1218  if (REQ_SUCCESS(req)) {
1219  if (handle->flags & UV_HANDLE_CLOSING) {
1220  /* use UV_ECANCELED for consistency with Unix */
1221  err = ERROR_OPERATION_ABORTED;
1222  } else if (setsockopt(handle->socket,
1223  SOL_SOCKET,
1225  NULL,
1226  0) == 0) {
1229  loop->active_tcp_streams++;
1230  } else {
1231  err = WSAGetLastError();
1232  }
1233  } else {
1235  }
1237 
1239 }
1240 
1241 
1243  int target_pid,
1244  uv__ipc_socket_xfer_type_t* xfer_type,
1245  uv__ipc_socket_xfer_info_t* xfer_info) {
1246  if (handle->flags & UV_HANDLE_CONNECTION) {
1248  } else {
1249  *xfer_type = UV__IPC_SOCKET_XFER_TCP_SERVER;
1250  /* We're about to share the socket with another process. Because this is a
1251  * listening socket, we assume that the other process will be accepting
1252  * connections on it. Thus, before sharing the socket with another process,
1253  * we call listen here in the parent process. */
1254  if (!(handle->flags & UV_HANDLE_LISTENING)) {
1255  if (!(handle->flags & UV_HANDLE_BOUND)) {
1256  return ERROR_NOT_SUPPORTED;
1257  }
1258  if (handle->delayed_error == 0 &&
1259  listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) {
1260  handle->delayed_error = WSAGetLastError();
1261  }
1262  }
1263  }
1264 
1265  if (WSADuplicateSocketW(handle->socket, target_pid, &xfer_info->socket_info))
1266  return WSAGetLastError();
1267  xfer_info->delayed_error = handle->delayed_error;
1268 
1269  /* Mark the local copy of the handle as 'shared' so we behave in a way that's
1270  * friendly to the process(es) that we share the socket with. */
1272 
1273  return 0;
1274 }
1275 
1276 
1278  uv__ipc_socket_xfer_type_t xfer_type,
1279  uv__ipc_socket_xfer_info_t* xfer_info) {
1280  int err;
1281  SOCKET socket;
1282 
1283  assert(xfer_type == UV__IPC_SOCKET_XFER_TCP_SERVER ||
1284  xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION);
1285 
1286  socket = WSASocketW(FROM_PROTOCOL_INFO,
1287  FROM_PROTOCOL_INFO,
1288  FROM_PROTOCOL_INFO,
1289  &xfer_info->socket_info,
1290  0,
1291  WSA_FLAG_OVERLAPPED);
1292 
1293  if (socket == INVALID_SOCKET) {
1294  return WSAGetLastError();
1295  }
1296 
1298  tcp->loop, tcp, socket, xfer_info->socket_info.iAddressFamily, 1);
1299  if (err) {
1300  closesocket(socket);
1301  return err;
1302  }
1303 
1304  tcp->delayed_error = xfer_info->delayed_error;
1306 
1307  if (xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION) {
1309  tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
1310  }
1311 
1312  tcp->loop->active_tcp_streams++;
1313  return 0;
1314 }
1315 
1316 
1317 int uv_tcp_nodelay(uv_tcp_t* handle, int enable) {
1318  int err;
1319 
1320  if (handle->socket != INVALID_SOCKET) {
1321  err = uv__tcp_nodelay(handle, handle->socket, enable);
1322  if (err)
1323  return err;
1324  }
1325 
1326  if (enable) {
1327  handle->flags |= UV_HANDLE_TCP_NODELAY;
1328  } else {
1329  handle->flags &= ~UV_HANDLE_TCP_NODELAY;
1330  }
1331 
1332  return 0;
1333 }
1334 
1335 
1336 int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
1337  int err;
1338 
1339  if (handle->socket != INVALID_SOCKET) {
1340  err = uv__tcp_keepalive(handle, handle->socket, enable, delay);
1341  if (err)
1342  return err;
1343  }
1344 
1345  if (enable) {
1346  handle->flags |= UV_HANDLE_TCP_KEEPALIVE;
1347  } else {
1348  handle->flags &= ~UV_HANDLE_TCP_KEEPALIVE;
1349  }
1350 
1351  /* TODO: Store delay if handle->socket isn't created yet. */
1352 
1353  return 0;
1354 }
1355 
1356 
1358  if (handle->flags & UV_HANDLE_CONNECTION) {
1359  return UV_EINVAL;
1360  }
1361 
1362  /* Check if we're already in the desired mode. */
1363  if ((enable && !(handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) ||
1364  (!enable && handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
1365  return 0;
1366  }
1367 
1368  /* Don't allow switching from single pending accept to many. */
1369  if (enable) {
1370  return UV_ENOTSUP;
1371  }
1372 
1373  /* Check if we're in a middle of changing the number of pending accepts. */
1375  return 0;
1376  }
1377 
1379 
1380  /* Flip the changing flag if we have already queued multiple accepts. */
1381  if (handle->flags & UV_HANDLE_LISTENING) {
1383  }
1384 
1385  return 0;
1386 }
1387 
1388 
1389 static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) {
1390  SOCKET socket = tcp->socket;
1391  int non_ifs_lsp;
1392 
1393  /* Check if we have any non-IFS LSPs stacked on top of TCP */
1394  non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
1396 
1397  /* If there are non-ifs LSPs then try to obtain a base handle for the socket.
1398  * This will always fail on Windows XP/3k. */
1399  if (non_ifs_lsp) {
1400  DWORD bytes;
1401  if (WSAIoctl(socket,
1403  NULL,
1404  0,
1405  &socket,
1406  sizeof socket,
1407  &bytes,
1408  NULL,
1409  NULL) != 0) {
1410  /* Failed. We can't do CancelIo. */
1411  return -1;
1412  }
1413  }
1414 
1415  assert(socket != 0 && socket != INVALID_SOCKET);
1416 
1417  if (!CancelIo((HANDLE) socket)) {
1418  return GetLastError();
1419  }
1420 
1421  /* It worked. */
1422  return 0;
1423 }
1424 
1425 
1427  int close_socket = 1;
1428 
1429  if (tcp->flags & UV_HANDLE_READ_PENDING) {
1430  /* In order for winsock to do a graceful close there must not be any any
1431  * pending reads, or the socket must be shut down for writing */
1432  if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) {
1433  /* Just do shutdown on non-shared sockets, which ensures graceful close. */
1434  shutdown(tcp->socket, SD_SEND);
1435 
1436  } else if (uv_tcp_try_cancel_io(tcp) == 0) {
1437  /* In case of a shared socket, we try to cancel all outstanding I/O,. If
1438  * that works, don't close the socket yet - wait for the read req to
1439  * return and close the socket in uv_tcp_endgame. */
1440  close_socket = 0;
1441 
1442  } else {
1443  /* When cancelling isn't possible - which could happen when an LSP is
1444  * present on an old Windows version, we will have to close the socket
1445  * with a read pending. That is not nice because trailing sent bytes may
1446  * not make it to the other side. */
1447  }
1448 
1449  } else if ((tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
1450  tcp->tcp.serv.accept_reqs != NULL) {
1451  /* Under normal circumstances closesocket() will ensure that all pending
1452  * accept reqs are canceled. However, when the socket is shared the
1453  * presence of another reference to the socket in another process will keep
1454  * the accept reqs going, so we have to ensure that these are canceled. */
1455  if (uv_tcp_try_cancel_io(tcp) != 0) {
1456  /* When cancellation is not possible, there is another option: we can
1457  * close the incoming sockets, which will also cancel the accept
1458  * operations. However this is not cool because we might inadvertently
1459  * close a socket that just accepted a new connection, which will cause
1460  * the connection to be aborted. */
1461  unsigned int i;
1462  for (i = 0; i < uv_simultaneous_server_accepts; i++) {
1463  uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i];
1464  if (req->accept_socket != INVALID_SOCKET &&
1465  !HasOverlappedIoCompleted(&req->u.io.overlapped)) {
1466  closesocket(req->accept_socket);
1467  req->accept_socket = INVALID_SOCKET;
1468  }
1469  }
1470  }
1471  }
1472 
1473  if (tcp->flags & UV_HANDLE_READING) {
1474  tcp->flags &= ~UV_HANDLE_READING;
1476  }
1477 
1478  if (tcp->flags & UV_HANDLE_LISTENING) {
1479  tcp->flags &= ~UV_HANDLE_LISTENING;
1481  }
1482 
1483  if (close_socket) {
1484  closesocket(tcp->socket);
1485  tcp->socket = INVALID_SOCKET;
1486  tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
1487  }
1488 
1489  tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
1490  uv__handle_closing(tcp);
1491 
1492  if (tcp->reqs_pending == 0) {
1493  uv_want_endgame(tcp->loop, (uv_handle_t*)tcp);
1494  }
1495 }
1496 
1497 
1499  WSAPROTOCOL_INFOW protocol_info;
1500  int opt_len;
1501  int err;
1502  struct sockaddr_storage saddr;
1503  int saddr_len;
1504 
1505  /* Detect the address family of the socket. */
1506  opt_len = (int) sizeof protocol_info;
1507  if (getsockopt(sock,
1508  SOL_SOCKET,
1509  SO_PROTOCOL_INFOW,
1510  (char*) &protocol_info,
1511  &opt_len) == SOCKET_ERROR) {
1512  return uv_translate_sys_error(GetLastError());
1513  }
1514 
1515  err = uv_tcp_set_socket(handle->loop,
1516  handle,
1517  sock,
1518  protocol_info.iAddressFamily,
1519  1);
1520  if (err) {
1521  return uv_translate_sys_error(err);
1522  }
1523 
1524  /* Support already active socket. */
1525  saddr_len = sizeof(saddr);
1526  if (!uv_tcp_getsockname(handle, (struct sockaddr*) &saddr, &saddr_len)) {
1527  /* Socket is already bound. */
1528  handle->flags |= UV_HANDLE_BOUND;
1529  saddr_len = sizeof(saddr);
1530  if (!uv_tcp_getpeername(handle, (struct sockaddr*) &saddr, &saddr_len)) {
1531  /* Socket is already connected. */
1534  }
1535  }
1536 
1537  return 0;
1538 }
1539 
1540 
1541 /* This function is an egress point, i.e. it returns libuv errors rather than
1542  * system errors.
1543  */
1545  const struct sockaddr* addr,
1546  unsigned int addrlen,
1547  unsigned int flags) {
1548  int err;
1549 
1551  if (err)
1552  return uv_translate_sys_error(err);
1553 
1554  return 0;
1555 }
1556 
1557 
1558 /* This function is an egress point, i.e. it returns libuv errors rather than
1559  * system errors.
1560  */
1562  uv_tcp_t* handle,
1563  const struct sockaddr* addr,
1564  unsigned int addrlen,
1565  uv_connect_cb cb) {
1566  int err;
1567 
1569  if (err)
1570  return uv_translate_sys_error(err);
1571 
1572  return 0;
1573 }
lzma_index ** i
Definition: index.h:629
static bool err
Definition: armass.c:435
static ut8 bytes[32]
Definition: asm_arc.c:23
static mcore_handle handle
Definition: asm_mcore.c:8
#define NULL
Definition: cris-opc.c:27
#define r
Definition: crypto_rc6.c:12
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void static offset struct stat static buf void long static basep static whence static length const void static len static semflg const void static shmflg const struct timespec req
Definition: sflib.h:128
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd domain
Definition: sflib.h:79
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void count
Definition: sflib.h:98
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd socket
Definition: sflib.h:79
struct tab * done
Definition: enough.c:233
void uv_fatal_error(const int errorno, const char *syscall)
Definition: error.c:35
#define DECREASE_ACTIVE_COUNT(loop, handle)
Definition: handle-inl.h:32
static INLINE void uv_want_endgame(uv_loop_t *loop, uv_handle_t *handle)
Definition: handle-inl.h:88
#define INCREASE_ACTIVE_COUNT(loop, handle)
Definition: handle-inl.h:42
#define uv__handle_close(handle)
Definition: handle-inl.h:76
#define DECREASE_PENDING_REQ_COUNT(handle)
Definition: handle-inl.h:51
#define uv__handle_closing(handle)
Definition: handle-inl.h:63
voidpf void * buf
Definition: ioapi.h:138
#define INVALID_HANDLE_VALUE
Definition: iowin32.c:21
return memset(p, 0, total)
static const void static count static fd struct stat static buf struct pollfd unsigned static timeout void static offset void static length char static len const struct iovec static count unsigned long static filedes static sched_yield static flags static oldfd static pause unsigned static seconds static protocol struct sockaddr addrlen
Definition: sflib.h:75
assert(limit<=UINT32_MAX/2)
#define QUEUE_REMOVE(q)
Definition: queue.h:101
static INLINE void uv_insert_pending_req(uv_loop_t *loop, uv_req_t *req)
Definition: req-inl.h:90
#define REQ_SUCCESS(req)
Definition: req-inl.h:46
#define GET_REQ_SOCK_ERROR(req)
Definition: req-inl.h:52
#define UNREGISTER_HANDLE_REQ(loop, handle, req)
Definition: req-inl.h:62
#define UV_SUCCEEDED_WITH_IOCP(result)
Definition: req-inl.h:72
#define REGISTER_HANDLE_REQ(loop, handle, req)
Definition: req-inl.h:56
#define UV_SUCCEEDED_WITHOUT_IOCP(result)
Definition: req-inl.h:69
#define SET_REQ_ERROR(req, error)
Definition: req-inl.h:34
#define SD_SEND
Definition: rz_socket.h:41
static struct sockaddr static addrlen listen
Definition: sfsocketcall.h:116
static struct sockaddr static addrlen static backlog const void static flags void flags
Definition: sfsocketcall.h:123
static bind
Definition: sfsocketcall.h:114
static int
Definition: sfsocketcall.h:114
#define SO_KEEPALIVE
Definition: sftypes.h:437
#define htons(x)
Definition: sftypes.h:475
#define SO_LINGER
Definition: sftypes.h:441
@ SOCK_STREAM
Definition: sftypes.h:224
#define SOL_SOCKET
Definition: sftypes.h:427
#define AF_INET
Definition: sftypes.h:287
#define AF_INET6
Definition: sftypes.h:295
#define AF_UNSPEC
Definition: sftypes.h:283
#define FIONBIO
Definition: sftypes.h:736
static INLINE void uv_stream_init(uv_loop_t *loop, uv_stream_t *handle, uv_handle_type type)
Definition: stream-inl.h:33
static INLINE void uv_connection_init(uv_stream_t *handle)
Definition: stream-inl.h:49
Definition: z80asm.h:102
struct in6_addr sin6_addr
Definition: sftypes.h:375
struct in_addr sin_addr
Definition: sftypes.h:344
WSAPROTOCOL_INFOW socket_info
Definition: internal.h:71
Definition: unix.h:123
Definition: uv.h:1780
Definition: uv.h:407
Definition: uv.h:547
Definition: uv.h:525
uv_loop_t * loop
Definition: main.c:7
int uv__getsockpeername(const uv_handle_t *handle, uv__peersockfunc func, struct sockaddr *name, int *namelen)
Definition: core.c:1490
struct sockaddr_in uv_addr_ip4_any_
Definition: winsock.c:34
uv__ipc_socket_xfer_type_t
Definition: internal.h:64
@ UV__IPC_SOCKET_XFER_TCP_CONNECTION
Definition: internal.h:66
@ UV__IPC_SOCKET_XFER_TCP_SERVER
Definition: internal.h:67
BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX *target)
Definition: winsock.c:71
int uv_tcp_non_ifs_lsp_ipv4
Definition: winsock.c:30
int uv_tcp_non_ifs_lsp_ipv6
Definition: winsock.c:31
BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX *target)
Definition: winsock.c:65
struct sockaddr_in6 uv_addr_ip6_any_
Definition: winsock.c:35
static char bufs[4][128]
Buffers for uint64_to_str() and uint64_to_nicestr()
Definition: util.c:18
int uv__tcp_keepalive(int fd, int on, unsigned int delay)
Definition: tcp.c:380
int uv__tcp_connect(uv_connect_t *req, uv_tcp_t *handle, const struct sockaddr *addr, unsigned int addrlen, uv_connect_cb cb)
Definition: tcp.c:204
int uv__tcp_bind(uv_tcp_t *tcp, const struct sockaddr *addr, unsigned int addrlen, unsigned int flags)
Definition: tcp.c:148
int uv_tcp_init(uv_loop_t *loop, uv_tcp_t *tcp)
Definition: tcp.c:143
int uv_tcp_close_reset(uv_tcp_t *handle, uv_close_cb close_cb)
Definition: tcp.c:311
int uv_tcp_listen(uv_tcp_t *tcp, int backlog, uv_connection_cb cb)
Definition: tcp.c:328
int uv_tcp_nodelay(uv_tcp_t *handle, int on)
Definition: tcp.c:410
int uv_tcp_open(uv_tcp_t *handle, uv_os_sock_t sock)
Definition: tcp.c:267
int uv_tcp_getpeername(const uv_tcp_t *handle, struct sockaddr *name, int *namelen)
Definition: tcp.c:297
int uv_tcp_simultaneous_accepts(uv_tcp_t *handle, int enable)
Definition: tcp.c:450
int uv_tcp_init_ex(uv_loop_t *loop, uv_tcp_t *tcp, unsigned int flags)
Definition: tcp.c:114
int uv_tcp_keepalive(uv_tcp_t *handle, int on, unsigned int delay)
Definition: tcp.c:428
int uv__tcp_nodelay(int fd, int on)
Definition: tcp.c:373
int uv_tcp_getsockname(const uv_tcp_t *handle, struct sockaddr *name, int *namelen)
Definition: tcp.c:283
int uv_os_sock_t
Definition: unix.h:129
size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs)
Definition: uv-common.c:573
void * uv__malloc(size_t size)
Definition: uv-common.c:75
void uv__free(void *ptr)
Definition: uv-common.c:81
@ UV_HANDLE_TCP_KEEPALIVE
Definition: uv-common.h:106
@ UV_HANDLE_ZERO_READ
Definition: uv-common.h:96
@ UV_HANDLE_IPV6
Definition: uv-common.h:102
@ UV_HANDLE_TCP_NODELAY
Definition: uv-common.h:105
@ UV_HANDLE_TCP_SOCKET_CLOSED
Definition: uv-common.h:109
@ UV_HANDLE_TCP_SINGLE_ACCEPT
Definition: uv-common.h:107
@ UV_HANDLE_READING
Definition: uv-common.h:90
@ UV_HANDLE_LISTENING
Definition: uv-common.h:82
@ UV_HANDLE_CLOSING
Definition: uv-common.h:74
@ UV_HANDLE_EMULATE_IOCP
Definition: uv-common.h:97
@ UV_HANDLE_CONNECTION
Definition: uv-common.h:83
@ UV_HANDLE_SHARED_TCP_SOCKET
Definition: uv-common.h:110
@ UV_HANDLE_SHUTTING
Definition: uv-common.h:84
@ UV_HANDLE_TCP_ACCEPT_STATE_CHANGING
Definition: uv-common.h:108
@ UV_HANDLE_WRITABLE
Definition: uv-common.h:93
@ UV_HANDLE_CLOSED
Definition: uv-common.h:75
@ UV_HANDLE_SYNC_BYPASS_IOCP
Definition: uv-common.h:95
@ UV_HANDLE_READ_PENDING
Definition: uv-common.h:94
@ UV_HANDLE_BOUND
Definition: uv-common.h:91
@ UV_HANDLE_READABLE
Definition: uv-common.h:92
#define UV_REQ_INIT(req, typ)
Definition: uv-common.h:322
void(* uv_write_cb)(uv_write_t *req, int status)
Definition: uv.h:315
void(* uv_connection_cb)(uv_stream_t *server, int status)
Definition: uv.h:318
UV_EXTERN int uv_translate_sys_error(int sys_errno)
Definition: core.c:1249
@ UV_TCP_IPV6ONLY
Definition: uv.h:564
UV_EXTERN uv_buf_t uv_buf_init(char *base, unsigned int len)
Definition: uv-common.c:157
void(* uv_read_cb)(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf)
Definition: uv.h:312
void(* uv_alloc_cb)(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf)
Definition: uv.h:309
UV_EXTERN void uv_close(uv_handle_t *handle, uv_close_cb close_cb)
Definition: core.c:108
void(* uv_connect_cb)(uv_connect_t *req, int status)
Definition: uv.h:316
void(* uv_close_cb)(uv_handle_t *handle)
Definition: uv.h:319
void uv_tcp_endgame(uv_loop_t *loop, uv_tcp_t *handle)
Definition: tcp.c:208
int uv__tcp_xfer_import(uv_tcp_t *tcp, uv__ipc_socket_xfer_type_t xfer_type, uv__ipc_socket_xfer_info_t *xfer_info)
Definition: tcp.c:1277
const unsigned int uv_simultaneous_server_accepts
Definition: tcp.c:43
static int uv_tcp_try_bind(uv_tcp_t *handle, const struct sockaddr *addr, unsigned int addrlen, unsigned int flags)
Definition: tcp.c:294
static int uv__is_fast_loopback_fail_supported()
Definition: tcp.c:772
int uv__tcp_try_write(uv_tcp_t *handle, const uv_buf_t bufs[], unsigned int nbufs)
Definition: tcp.c:968
static void uv_tcp_queue_read(uv_loop_t *loop, uv_tcp_t *handle)
Definition: tcp.c:477
const unsigned int uv_active_tcp_streams_threshold
Definition: tcp.c:38
int uv__tcp_xfer_export(uv_tcp_t *handle, int target_pid, uv__ipc_socket_xfer_type_t *xfer_type, uv__ipc_socket_xfer_info_t *xfer_info)
Definition: tcp.c:1242
static void CALLBACK post_completion(void *context, BOOLEAN timed_out)
Definition: tcp.c:355
static int uv_tcp_set_socket(uv_loop_t *loop, uv_tcp_t *handle, SOCKET socket, int family, int imported)
Definition: tcp.c:81
void uv_process_tcp_connect_req(uv_loop_t *loop, uv_tcp_t *handle, uv_connect_t *req)
Definition: tcp.c:1209
void uv_tcp_close(uv_loop_t *loop, uv_tcp_t *tcp)
Definition: tcp.c:1426
static int uv_tcp_try_cancel_io(uv_tcp_t *tcp)
Definition: tcp.c:1389
int uv_tcp_read_start(uv_tcp_t *handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb)
Definition: tcp.c:726
static int uv__is_loopback(const struct sockaddr_storage *storage)
Definition: tcp.c:751
void uv_process_tcp_write_req(uv_loop_t *loop, uv_tcp_t *handle, uv_write_t *req)
Definition: tcp.c:1120
int uv_tcp_accept(uv_tcp_t *server, uv_tcp_t *client)
Definition: tcp.c:657
void uv_process_tcp_accept_req(uv_loop_t *loop, uv_tcp_t *handle, uv_req_t *raw_req)
Definition: tcp.c:1161
static void uv_tcp_queue_accept(uv_tcp_t *handle, uv_tcp_accept_t *req)
Definition: tcp.c:393
int uv_tcp_write(uv_loop_t *loop, uv_write_t *req, uv_tcp_t *handle, const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb)
Definition: tcp.c:901
static void CALLBACK post_write_completion(void *context, BOOLEAN timed_out)
Definition: tcp.c:374
void uv_process_tcp_read_req(uv_loop_t *loop, uv_tcp_t *handle, uv_req_t *req)
Definition: tcp.c:992
static int uv_tcp_try_connect(uv_connect_t *req, uv_tcp_t *handle, const struct sockaddr *addr, unsigned int addrlen, uv_connect_cb cb)
Definition: tcp.c:786
static char uv_zero_[]
Definition: tcp.c:46
sRtlGetVersion pRtlGetVersion
Definition: winapi.c:29
#define FILE_SKIP_SET_EVENT_ON_HANDLE
Definition: winapi.h:4607
#define FILE_SKIP_COMPLETION_PORT_ON_SUCCESS
Definition: winapi.h:4603
#define TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS
Definition: winapi.h:4736
#define SIO_TCP_INITIAL_RTO
Definition: winapi.h:4739
DWORD * HANDLE
DWORD
int uv__convert_to_localhost_if_unspecified(const struct sockaddr *addr, struct sockaddr_storage *storage)
Definition: winsock.c:547
#define IPV6_V6ONLY
Definition: winsock.h:46
#define TCP_KEEPALIVE
Definition: winsock.h:42
#define SIO_BASE_HANDLE
Definition: winsock.h:54
#define SO_UPDATE_CONNECT_CONTEXT
Definition: winsock.h:38
static const char * cb[]
Definition: z80_tab.h:176
static int addr
Definition: z80asm.c:58