47 goto fail_metrics_mutex_init;
49 heap_init((
struct heap*) &
loop->timer_heap);
66 uv__update_time(
loop);
67 loop->async_io_watcher.fd = -1;
69 loop->signal_pipefd[0] = -1;
70 loop->signal_pipefd[1] = -1;
71 loop->backend_fd = -1;
74 loop->timer_counter = 0;
79 goto fail_platform_init;
84 goto fail_signal_init;
92 goto fail_rwlock_init;
100 goto fail_async_init;
122 fail_metrics_mutex_init:
150 for (
i = 0;
i <
loop->nwatchers;
i++) {
172 if (
loop->emfile_fd != -1) {
174 loop->emfile_fd = -1;
177 if (
loop->backend_fd != -1) {
179 loop->backend_fd = -1;
223 if (va_arg(ap,
int) != SIGPROF)
int uv__platform_loop_init(uv_loop_t *loop)
int uv__io_fork(uv_loop_t *loop)
void uv__platform_loop_delete(uv_loop_t *loop)
return memset(p, 0, total)
int uv__loop_configure(uv_loop_t *loop, uv_loop_option option, va_list ap)
int uv_loop_fork(uv_loop_t *loop)
int uv_loop_init(uv_loop_t *loop)
void uv__loop_close(uv_loop_t *loop)
assert(limit<=UINT32_MAX/2)
#define QUEUE_INSERT_TAIL(h, q)
uv__loop_metrics_t loop_metrics
union uv_loop_s::@400 active_reqs
unsigned int active_handles
void uv__signal_loop_cleanup(uv_loop_t *loop)
int uv__signal_loop_fork(uv_loop_t *loop)
void uv__signal_global_once_init(void)
void uv__work_done(uv_async_t *handle)
int uv__async_fork(uv_loop_t *loop)
void uv__async_stop(uv_loop_t *loop)
void * uv__calloc(size_t count, size_t size)
#define uv__handle_unref(h)
#define uv__has_active_reqs(loop)
#define uv__get_internal_fields(loop)
UV_EXTERN void uv_mutex_lock(uv_mutex_t *handle)
UV_EXTERN int uv_rwlock_init(uv_rwlock_t *rwlock)
UV_EXTERN void uv_mutex_destroy(uv_mutex_t *handle)
UV_EXTERN void uv_mutex_unlock(uv_mutex_t *handle)
UV_EXTERN void uv_rwlock_destroy(uv_rwlock_t *rwlock)
UV_EXTERN int uv_async_init(uv_loop_t *, uv_async_t *async, uv_async_cb async_cb)
UV_EXTERN int uv_signal_init(uv_loop_t *loop, uv_signal_t *handle)
UV_EXTERN int uv_mutex_init(uv_mutex_t *handle)