Rizin
unix-like reverse engineering framework and cli tools
kqueue.c
Go to the documentation of this file.
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18  * IN THE SOFTWARE.
19  */
20 
21 #include "uv.h"
22 #include "internal.h"
23 
24 #include <assert.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <errno.h>
28 
29 #include <sys/sysctl.h>
30 #include <sys/types.h>
31 #include <sys/event.h>
32 #include <sys/time.h>
33 #include <unistd.h>
34 #include <fcntl.h>
35 #include <time.h>
36 
37 /*
38  * Required on
39  * - Until at least FreeBSD 11.0
40  * - Older versions of Mac OS X
41  *
42  * http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
43  */
44 #ifndef EV_OOBAND
45 #define EV_OOBAND EV_FLAG1
46 #endif
47 
48 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
49 
50 
52  loop->backend_fd = kqueue();
53  if (loop->backend_fd == -1)
54  return UV__ERR(errno);
55 
56  uv__cloexec(loop->backend_fd, 1);
57 
58  return 0;
59 }
60 
61 
62 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
63 static int uv__has_forked_with_cfrunloop;
64 #endif
65 
67  int err;
68  loop->backend_fd = -1;
70  if (err)
71  return err;
72 
73 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
74  if (loop->cf_state != NULL) {
75  /* We cannot start another CFRunloop and/or thread in the child
76  process; CF aborts if you try or if you try to touch the thread
77  at all to kill it. So the best we can do is ignore it from now
78  on. This means we can't watch directories in the same way
79  anymore (like other BSDs). It also means we cannot properly
80  clean up the allocated resources; calling
81  uv__fsevents_loop_delete from uv_loop_close will crash the
82  process. So we sidestep the issue by pretending like we never
83  started it in the first place.
84  */
85  uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
86  uv__free(loop->cf_state);
87  loop->cf_state = NULL;
88  }
89 #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
90  return err;
91 }
92 
93 
95  struct kevent ev;
96  int rc;
97 
98  rc = 0;
99  EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
100  if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
101  rc = UV__ERR(errno);
102 
103  EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
104  if (rc == 0)
105  if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
106  abort();
107 
108  return rc;
109 }
110 
111 
113  struct kevent events[1024];
114  struct kevent* ev;
115  struct timespec spec;
116  unsigned int nevents;
117  unsigned int revents;
118  QUEUE* q;
119  uv__io_t* w;
120  sigset_t* pset;
121  sigset_t set;
122  uint64_t base;
123  uint64_t diff;
124  int have_signals;
125  int filter;
126  int fflags;
127  int count;
128  int nfds;
129  int fd;
130  int op;
131  int i;
132  int user_timeout;
133  int reset_timeout;
134 
135  if (loop->nfds == 0) {
136  assert(QUEUE_EMPTY(&loop->watcher_queue));
137  return;
138  }
139 
140  nevents = 0;
141 
142  while (!QUEUE_EMPTY(&loop->watcher_queue)) {
143  q = QUEUE_HEAD(&loop->watcher_queue);
144  QUEUE_REMOVE(q);
145  QUEUE_INIT(q);
146 
147  w = QUEUE_DATA(q, uv__io_t, watcher_queue);
148  assert(w->pevents != 0);
149  assert(w->fd >= 0);
150  assert(w->fd < (int) loop->nwatchers);
151 
152  if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
153  filter = EVFILT_READ;
154  fflags = 0;
155  op = EV_ADD;
156 
157  if (w->cb == uv__fs_event) {
158  filter = EVFILT_VNODE;
159  fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
160  | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
161  op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
162  }
163 
164  EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
165 
166  if (++nevents == ARRAY_SIZE(events)) {
167  if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
168  abort();
169  nevents = 0;
170  }
171  }
172 
173  if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
174  EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
175 
176  if (++nevents == ARRAY_SIZE(events)) {
177  if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
178  abort();
179  nevents = 0;
180  }
181  }
182 
183  if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
184  EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
185 
186  if (++nevents == ARRAY_SIZE(events)) {
187  if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
188  abort();
189  nevents = 0;
190  }
191  }
192 
193  w->events = w->pevents;
194  }
195 
196  pset = NULL;
197  if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
198  pset = &set;
199  sigemptyset(pset);
200  sigaddset(pset, SIGPROF);
201  }
202 
203  assert(timeout >= -1);
204  base = loop->time;
205  count = 48; /* Benchmarks suggest this gives the best throughput. */
206 
208  reset_timeout = 1;
209  user_timeout = timeout;
210  timeout = 0;
211  } else {
212  reset_timeout = 0;
213  }
214 
215  for (;; nevents = 0) {
216  /* Only need to set the provider_entry_time if timeout != 0. The function
217  * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
218  */
219  if (timeout != 0)
221 
222  if (timeout != -1) {
223  spec.tv_sec = timeout / 1000;
224  spec.tv_nsec = (timeout % 1000) * 1000000;
225  }
226 
227  if (pset != NULL)
228  pthread_sigmask(SIG_BLOCK, pset, NULL);
229 
230  nfds = kevent(loop->backend_fd,
231  events,
232  nevents,
233  events,
234  ARRAY_SIZE(events),
235  timeout == -1 ? NULL : &spec);
236 
237  if (pset != NULL)
238  pthread_sigmask(SIG_UNBLOCK, pset, NULL);
239 
240  /* Update loop->time unconditionally. It's tempting to skip the update when
241  * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
242  * operating system didn't reschedule our process while in the syscall.
243  */
244  SAVE_ERRNO(uv__update_time(loop));
245 
246  if (nfds == 0) {
247  if (reset_timeout != 0) {
248  timeout = user_timeout;
249  reset_timeout = 0;
250  if (timeout == -1)
251  continue;
252  if (timeout > 0)
253  goto update_timeout;
254  }
255 
256  assert(timeout != -1);
257  return;
258  }
259 
260  if (nfds == -1) {
261  if (errno != EINTR)
262  abort();
263 
264  if (reset_timeout != 0) {
265  timeout = user_timeout;
266  reset_timeout = 0;
267  }
268 
269  if (timeout == 0)
270  return;
271 
272  if (timeout == -1)
273  continue;
274 
275  /* Interrupted by a signal. Update timeout and poll again. */
276  goto update_timeout;
277  }
278 
279  have_signals = 0;
280  nevents = 0;
281 
282  assert(loop->watchers != NULL);
283  loop->watchers[loop->nwatchers] = (void*) events;
284  loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
285  for (i = 0; i < nfds; i++) {
286  ev = events + i;
287  fd = ev->ident;
288  /* Skip invalidated events, see uv__platform_invalidate_fd */
289  if (fd == -1)
290  continue;
291  w = loop->watchers[fd];
292 
293  if (w == NULL) {
294  /* File descriptor that we've stopped watching, disarm it.
295  * TODO: batch up. */
296  struct kevent events[1];
297 
298  EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
299  if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
300  if (errno != EBADF && errno != ENOENT)
301  abort();
302 
303  continue;
304  }
305 
306  if (ev->filter == EVFILT_VNODE) {
307  assert(w->events == POLLIN);
308  assert(w->pevents == POLLIN);
310  w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
311  nevents++;
312  continue;
313  }
314 
315  revents = 0;
316 
317  if (ev->filter == EVFILT_READ) {
318  if (w->pevents & POLLIN) {
319  revents |= POLLIN;
320  w->rcount = ev->data;
321  } else {
322  /* TODO batch up */
323  struct kevent events[1];
324  EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
325  if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
326  if (errno != ENOENT)
327  abort();
328  }
329  }
330 
331  if (ev->filter == EV_OOBAND) {
332  if (w->pevents & UV__POLLPRI) {
333  revents |= UV__POLLPRI;
334  w->rcount = ev->data;
335  } else {
336  /* TODO batch up */
337  struct kevent events[1];
338  EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
339  if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
340  if (errno != ENOENT)
341  abort();
342  }
343  }
344 
345  if (ev->filter == EVFILT_WRITE) {
346  if (w->pevents & POLLOUT) {
347  revents |= POLLOUT;
348  w->wcount = ev->data;
349  } else {
350  /* TODO batch up */
351  struct kevent events[1];
352  EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
353  if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
354  if (errno != ENOENT)
355  abort();
356  }
357  }
358 
359  if (ev->flags & EV_ERROR)
360  revents |= POLLERR;
361 
362  if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
363  revents |= UV__POLLRDHUP;
364 
365  if (revents == 0)
366  continue;
367 
368  /* Run signal watchers last. This also affects child process watchers
369  * because those are implemented in terms of signal watchers.
370  */
371  if (w == &loop->signal_io_watcher) {
372  have_signals = 1;
373  } else {
375  w->cb(loop, w, revents);
376  }
377 
378  nevents++;
379  }
380 
381  if (reset_timeout != 0) {
382  timeout = user_timeout;
383  reset_timeout = 0;
384  }
385 
386  if (have_signals != 0) {
388  loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
389  }
390 
391  loop->watchers[loop->nwatchers] = NULL;
392  loop->watchers[loop->nwatchers + 1] = NULL;
393 
394  if (have_signals != 0)
395  return; /* Event loop should cycle now so don't poll again. */
396 
397  if (nevents != 0) {
398  if (nfds == ARRAY_SIZE(events) && --count != 0) {
399  /* Poll for more events but don't block this time. */
400  timeout = 0;
401  continue;
402  }
403  return;
404  }
405 
406  if (timeout == 0)
407  return;
408 
409  if (timeout == -1)
410  continue;
411 
412 update_timeout:
413  assert(timeout > 0);
414 
415  diff = loop->time - base;
416  if (diff >= (uint64_t) timeout)
417  return;
418 
419  timeout -= diff;
420  }
421 }
422 
423 
425  struct kevent* events;
426  uintptr_t i;
427  uintptr_t nfds;
428 
429  assert(loop->watchers != NULL);
430  assert(fd >= 0);
431 
432  events = (struct kevent*) loop->watchers[loop->nwatchers];
433  nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
434  if (events == NULL)
435  return;
436 
437  /* Invalidate events with same file descriptor */
438  for (i = 0; i < nfds; i++)
439  if ((int) events[i].ident == fd)
440  events[i].ident = -1;
441 }
442 
443 
444 static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
446  struct kevent ev;
447  int events;
448  const char* path;
449 #if defined(F_GETPATH)
450  /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
451  char pathbuf[MAXPATHLEN];
452 #endif
453 
454  handle = container_of(w, uv_fs_event_t, event_watcher);
455 
456  if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
457  events = UV_CHANGE;
458  else
459  events = UV_RENAME;
460 
461  path = NULL;
462 #if defined(F_GETPATH)
463  /* Also works when the file has been unlinked from the file system. Passing
464  * in the path when the file has been deleted is arguably a little strange
465  * but it's consistent with what the inotify backend does.
466  */
467  if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
468  path = uv__basename_r(pathbuf);
469 #endif
470  handle->cb(handle, path, events, 0);
471 
472  if (handle->event_watcher.fd == -1)
473  return;
474 
475  /* Watcher operates in one-shot mode, re-arm it. */
476  fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
477  | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
478 
479  EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
480 
481  if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
482  abort();
483 }
484 
485 
487  uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
488  return 0;
489 }
490 
491 
494  const char* path,
495  unsigned int flags) {
496  int fd;
497 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
498  struct stat statbuf;
499 #endif
500 
501  if (uv__is_active(handle))
502  return UV_EINVAL;
503 
504  handle->cb = cb;
505  handle->path = uv__strdup(path);
506  if (handle->path == NULL)
507  return UV_ENOMEM;
508 
509  /* TODO open asynchronously - but how do we report back errors? */
510  fd = open(handle->path, O_RDONLY);
511  if (fd == -1) {
512  uv__free(handle->path);
513  handle->path = NULL;
514  return UV__ERR(errno);
515  }
516 
517 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
518  /* Nullify field to perform checks later */
519  handle->cf_cb = NULL;
520  handle->realpath = NULL;
521  handle->realpath_len = 0;
522  handle->cf_flags = flags;
523 
524  if (fstat(fd, &statbuf))
525  goto fallback;
526  /* FSEvents works only with directories */
527  if (!(statbuf.st_mode & S_IFDIR))
528  goto fallback;
529 
530  if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
531  int r;
532  /* The fallback fd is no longer needed */
534  handle->event_watcher.fd = -1;
536  if (r == 0) {
538  } else {
539  uv__free(handle->path);
540  handle->path = NULL;
541  }
542  return r;
543  }
544 fallback:
545 #endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
546 
548  uv__io_init(&handle->event_watcher, uv__fs_event, fd);
549  uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
550 
551  return 0;
552 }
553 
554 
556  int r;
557  r = 0;
558 
559  if (!uv__is_active(handle))
560  return 0;
561 
563 
564 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
565  if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
566  if (handle->cf_cb != NULL)
568 #endif
569 
570  if (handle->event_watcher.fd != -1) {
571  uv__io_close(handle->loop, &handle->event_watcher);
572  uv__close(handle->event_watcher.fd);
573  handle->event_watcher.fd = -1;
574  }
575 
576  uv__free(handle->path);
577  handle->path = NULL;
578 
579  return r;
580 }
581 
582 
585 }
ut8 op
Definition: 6502dis.c:13
#define ARRAY_SIZE(a)
lzma_index ** i
Definition: index.h:629
static bool err
Definition: armass.c:435
static mcore_handle handle
Definition: asm_mcore.c:8
#define NULL
Definition: cris-opc.c:27
#define r
Definition: crypto_rc6.c:12
#define w
Definition: crypto_rc6.c:13
static static fork const void static count static fd const char const char static newpath const char static path const char path
Definition: sflib.h:35
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void count
Definition: sflib.h:98
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void static offset fstat
Definition: sflib.h:107
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags fcntl
Definition: sflib.h:79
#define UV__ERR(x)
Definition: errno.h:29
int uv__fsevents_init(uv_fs_event_t *handle)
Definition: fsevents.c:29
int uv__fsevents_close(uv_fs_event_t *handle)
Definition: fsevents.c:34
void uv__fs_event_close(uv_fs_event_t *handle)
Definition: kqueue.c:583
#define EV_OOBAND
Definition: kqueue.c:45
int uv_fs_event_stop(uv_fs_event_t *handle)
Definition: kqueue.c:555
void uv__platform_invalidate_fd(uv_loop_t *loop, int fd)
Definition: kqueue.c:424
void uv__io_poll(uv_loop_t *loop, int timeout)
Definition: kqueue.c:112
int uv__io_check_fd(uv_loop_t *loop, int fd)
Definition: kqueue.c:94
int uv_fs_event_start(uv_fs_event_t *handle, uv_fs_event_cb cb, const char *path, unsigned int flags)
Definition: kqueue.c:492
int uv__io_fork(uv_loop_t *loop)
Definition: kqueue.c:66
int uv_fs_event_init(uv_loop_t *loop, uv_fs_event_t *handle)
Definition: kqueue.c:486
int uv__kqueue_init(uv_loop_t *loop)
Definition: kqueue.c:51
static void uv__fs_event(uv_loop_t *loop, uv__io_t *w, unsigned int fflags)
Definition: kqueue.c:444
static const char struct stat static buf struct stat static buf static vhangup int struct rusage static rusage struct sysinfo static info unsigned static __unused struct utsname static buf const char static size const char static name static pid unsigned static persona static fsgid const void static flags const struct iovec static count static fd const void static len static munlockall struct sched_param static p static sched_yield static policy const struct timespec struct timespec static rem uid_t uid_t uid_t static suid struct pollfd unsigned nfds
Definition: sflib.h:196
assert(limit<=UINT32_MAX/2)
static bool filter(RzParse *p, ut64 addr, RzFlag *f, RzAnalysisHint *hint, char *data, char *str, int len, bool big_endian)
Definition: filter.c:185
#define QUEUE_DATA(ptr, type, field)
Definition: queue.h:30
#define QUEUE_EMPTY(q)
Definition: queue.h:39
#define QUEUE_HEAD(q)
Definition: queue.h:42
#define QUEUE_INIT(q)
Definition: queue.h:45
void * QUEUE[2]
Definition: queue.h:21
#define QUEUE_REMOVE(q)
Definition: queue.h:101
#define container_of(ptr, type, member)
Definition: rz_types.h:650
static struct sockaddr static addrlen static backlog const void static flags void flags
Definition: sfsocketcall.h:123
#define ENOENT
Definition: sftypes.h:112
#define EINTR
Definition: sftypes.h:114
#define O_RDONLY
Definition: sftypes.h:486
unsigned long uint64_t
Definition: sftypes.h:28
#define EBADF
Definition: sftypes.h:119
int sigset_t
Definition: sftypes.h:63
_W64 unsigned int uintptr_t
Definition: sftypes.h:80
long tv_nsec
Definition: sftypes.h:90
time_t tv_sec
Definition: sftypes.h:89
Definition: unix.h:96
Definition: uv.h:1780
uv_loop_t * loop
Definition: main.c:7
uv_timer_t timeout
Definition: main.c:9
int uv__close_nocheckstdio(int fd)
Definition: core.c:550
void uv__io_start(uv_loop_t *loop, uv__io_t *w, unsigned int events)
Definition: core.c:882
void uv__io_close(uv_loop_t *loop, uv__io_t *w)
Definition: core.c:942
void uv__io_init(uv__io_t *w, uv__io_cb cb, int fd)
Definition: core.c:865
int uv__close(int fd)
Definition: core.c:569
#define uv__cloexec
Definition: internal.h:169
#define UV__POLLRDHUP
Definition: internal.h:116
#define UV__POLLPRI
Definition: internal.h:122
@ UV_LOOP_BLOCK_SIGPROF
Definition: internal.h:137
#define SAVE_ERRNO(block)
Definition: internal.h:92
Definition: dis.c:32
char * uv__strdup(const char *s)
Definition: uv-common.c:55
void uv__metrics_update_idle_time(uv_loop_t *loop)
Definition: uv-common.c:872
void uv__free(void *ptr)
Definition: uv-common.c:81
void uv__metrics_set_provider_entry_time(uv_loop_t *loop)
Definition: uv-common.c:899
#define uv__load_relaxed(p)
Definition: uv-common.h:67
#define uv__handle_init(loop_, h, type_)
Definition: uv-common.h:301
#define uv__store_relaxed(p, v)
Definition: uv-common.h:68
#define uv__handle_stop(h)
Definition: uv-common.h:266
#define uv__is_active(h)
Definition: uv-common.h:252
#define uv__get_internal_fields(loop)
Definition: uv-common.h:336
#define uv__handle_start(h)
Definition: uv-common.h:258
void(* uv_fs_event_cb)(uv_fs_event_t *handle, const char *filename, int events, int status)
Definition: uv.h:369
@ UV_CHANGE
Definition: uv.h:1542
@ UV_RENAME
Definition: uv.h:1541
@ UV_METRICS_IDLE_TIME
Definition: uv.h:251
static const z80_opcode fd[]
Definition: z80_tab.h:997
static const char * cb[]
Definition: z80_tab.h:176