Rizin
unix-like reverse engineering framework and cli tools
linux-core.c
Go to the documentation of this file.
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18  * IN THE SOFTWARE.
19  */
20 
21 /* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
22  * EPOLL* counterparts. We use the POLL* variants in this file because that
23  * is what libuv uses elsewhere.
24  */
25 
26 #include "uv.h"
27 #include "internal.h"
28 
29 #include <inttypes.h>
30 #include <stdint.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <assert.h>
35 #include <errno.h>
36 
37 #include <net/if.h>
38 #include <sys/epoll.h>
39 #include <sys/param.h>
40 #include <sys/prctl.h>
41 #include <sys/sysinfo.h>
42 #include <unistd.h>
43 #include <fcntl.h>
44 #include <time.h>
45 
46 #define HAVE_IFADDRS_H 1
47 
48 #ifdef __UCLIBC__
49 # if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
50 # undef HAVE_IFADDRS_H
51 # endif
52 #endif
53 
54 #ifdef HAVE_IFADDRS_H
55 # if defined(__ANDROID__)
56 # include "uv/android-ifaddrs.h"
57 # else
58 # include <ifaddrs.h>
59 # endif
60 # include <sys/socket.h>
61 # include <net/ethernet.h>
62 # include <netpacket/packet.h>
63 #endif /* HAVE_IFADDRS_H */
64 
65 /* Available from 2.6.32 onwards. */
66 #ifndef CLOCK_MONOTONIC_COARSE
67 # define CLOCK_MONOTONIC_COARSE 6
68 #endif
69 
70 /* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
71  * include that file because it conflicts with <time.h>. We'll just have to
72  * define it ourselves.
73  */
74 #ifndef CLOCK_BOOTTIME
75 # define CLOCK_BOOTTIME 7
76 #endif
77 
78 static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
79 static int read_times(FILE* statfile_fp,
80  unsigned int numcpus,
81  uv_cpu_info_t* ci);
82 static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
83 static uint64_t read_cpufreq(unsigned int cpunum);
84 
85 
87  int fd;
89 
90  /* epoll_create1() can fail either because it's not implemented (old kernel)
91  * or because it doesn't understand the O_CLOEXEC flag.
92  */
93  if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
94  fd = epoll_create(256);
95 
96  if (fd != -1)
97  uv__cloexec(fd, 1);
98  }
99 
100  loop->backend_fd = fd;
101  loop->inotify_fd = -1;
102  loop->inotify_watchers = NULL;
103 
104  if (fd == -1)
105  return UV__ERR(errno);
106 
107  return 0;
108 }
109 
110 
112  int err;
113  void* old_watchers;
114 
115  old_watchers = loop->inotify_watchers;
116 
117  uv__close(loop->backend_fd);
118  loop->backend_fd = -1;
120 
122  if (err)
123  return err;
124 
125  return uv__inotify_fork(loop, old_watchers);
126 }
127 
128 
130  if (loop->inotify_fd == -1) return;
131  uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
132  uv__close(loop->inotify_fd);
133  loop->inotify_fd = -1;
134 }
135 
136 
138  struct epoll_event* events;
139  struct epoll_event dummy;
140  uintptr_t i;
141  uintptr_t nfds;
142 
143  assert(loop->watchers != NULL);
144  assert(fd >= 0);
145 
146  events = (struct epoll_event*) loop->watchers[loop->nwatchers];
147  nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
148  if (events != NULL)
149  /* Invalidate events with same file descriptor */
150  for (i = 0; i < nfds; i++)
151  if (events[i].data.fd == fd)
152  events[i].data.fd = -1;
153 
154  /* Remove the file descriptor from the epoll.
155  * This avoids a problem where the same file description remains open
156  * in another process, causing repeated junk epoll events.
157  *
158  * We pass in a dummy epoll_event, to work around a bug in old kernels.
159  */
160  if (loop->backend_fd >= 0) {
161  /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
162  * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
163  */
164  memset(&dummy, 0, sizeof(dummy));
165  epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
166  }
167 }
168 
169 
171  struct epoll_event e;
172  int rc;
173 
174  memset(&e, 0, sizeof(e));
175  e.events = POLLIN;
176  e.data.fd = -1;
177 
178  rc = 0;
179  if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
180  if (errno != EEXIST)
181  rc = UV__ERR(errno);
182 
183  if (rc == 0)
184  if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
185  abort();
186 
187  return rc;
188 }
189 
190 
192  /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
193  * effectively infinite on 32 bits architectures. To avoid blocking
194  * indefinitely, we cap the timeout and poll again if necessary.
195  *
196  * Note that "30 minutes" is a simplification because it depends on
197  * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
198  * that being the largest value I have seen in the wild (and only once.)
199  */
200  static const int max_safe_timeout = 1789569;
201  static int no_epoll_pwait_cached;
202  static int no_epoll_wait_cached;
203  int no_epoll_pwait;
204  int no_epoll_wait;
205  struct epoll_event events[1024];
206  struct epoll_event* pe;
207  struct epoll_event e;
208  int real_timeout;
209  QUEUE* q;
210  uv__io_t* w;
211  sigset_t sigset;
212  uint64_t sigmask;
213  uint64_t base;
214  int have_signals;
215  int nevents;
216  int count;
217  int nfds;
218  int fd;
219  int op;
220  int i;
221  int user_timeout;
222  int reset_timeout;
223 
224  if (loop->nfds == 0) {
225  assert(QUEUE_EMPTY(&loop->watcher_queue));
226  return;
227  }
228 
229  memset(&e, 0, sizeof(e));
230 
231  while (!QUEUE_EMPTY(&loop->watcher_queue)) {
232  q = QUEUE_HEAD(&loop->watcher_queue);
233  QUEUE_REMOVE(q);
234  QUEUE_INIT(q);
235 
236  w = QUEUE_DATA(q, uv__io_t, watcher_queue);
237  assert(w->pevents != 0);
238  assert(w->fd >= 0);
239  assert(w->fd < (int) loop->nwatchers);
240 
241  e.events = w->pevents;
242  e.data.fd = w->fd;
243 
244  if (w->events == 0)
245  op = EPOLL_CTL_ADD;
246  else
247  op = EPOLL_CTL_MOD;
248 
249  /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
250  * events, skip the syscall and squelch the events after epoll_wait().
251  */
252  if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
253  if (errno != EEXIST)
254  abort();
255 
256  assert(op == EPOLL_CTL_ADD);
257 
258  /* We've reactivated a file descriptor that's been watched before. */
259  if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
260  abort();
261  }
262 
263  w->events = w->pevents;
264  }
265 
266  sigmask = 0;
267  if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
268  sigemptyset(&sigset);
269  sigaddset(&sigset, SIGPROF);
270  sigmask |= 1 << (SIGPROF - 1);
271  }
272 
273  assert(timeout >= -1);
274  base = loop->time;
275  count = 48; /* Benchmarks suggest this gives the best throughput. */
276  real_timeout = timeout;
277 
279  reset_timeout = 1;
280  user_timeout = timeout;
281  timeout = 0;
282  } else {
283  reset_timeout = 0;
284  user_timeout = 0;
285  }
286 
287  /* You could argue there is a dependency between these two but
288  * ultimately we don't care about their ordering with respect
289  * to one another. Worst case, we make a few system calls that
290  * could have been avoided because another thread already knows
291  * they fail with ENOSYS. Hardly the end of the world.
292  */
293  no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
294  no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
295 
296  for (;;) {
297  /* Only need to set the provider_entry_time if timeout != 0. The function
298  * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
299  */
300  if (timeout != 0)
302 
303  /* See the comment for max_safe_timeout for an explanation of why
304  * this is necessary. Executive summary: kernel bug workaround.
305  */
306  if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
307  timeout = max_safe_timeout;
308 
309  if (sigmask != 0 && no_epoll_pwait != 0)
310  if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
311  abort();
312 
313  if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
314  nfds = epoll_pwait(loop->backend_fd,
315  events,
317  timeout,
318  &sigset);
319  if (nfds == -1 && errno == ENOSYS) {
320  uv__store_relaxed(&no_epoll_pwait_cached, 1);
321  no_epoll_pwait = 1;
322  }
323  } else {
324  nfds = epoll_wait(loop->backend_fd,
325  events,
327  timeout);
328  if (nfds == -1 && errno == ENOSYS) {
329  uv__store_relaxed(&no_epoll_wait_cached, 1);
330  no_epoll_wait = 1;
331  }
332  }
333 
334  if (sigmask != 0 && no_epoll_pwait != 0)
335  if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
336  abort();
337 
338  /* Update loop->time unconditionally. It's tempting to skip the update when
339  * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
340  * operating system didn't reschedule our process while in the syscall.
341  */
342  SAVE_ERRNO(uv__update_time(loop));
343 
344  if (nfds == 0) {
345  assert(timeout != -1);
346 
347  if (reset_timeout != 0) {
348  timeout = user_timeout;
349  reset_timeout = 0;
350  }
351 
352  if (timeout == -1)
353  continue;
354 
355  if (timeout == 0)
356  return;
357 
358  /* We may have been inside the system call for longer than |timeout|
359  * milliseconds so we need to update the timestamp to avoid drift.
360  */
361  goto update_timeout;
362  }
363 
364  if (nfds == -1) {
365  if (errno == ENOSYS) {
366  /* epoll_wait() or epoll_pwait() failed, try the other system call. */
367  assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
368  continue;
369  }
370 
371  if (errno != EINTR)
372  abort();
373 
374  if (reset_timeout != 0) {
375  timeout = user_timeout;
376  reset_timeout = 0;
377  }
378 
379  if (timeout == -1)
380  continue;
381 
382  if (timeout == 0)
383  return;
384 
385  /* Interrupted by a signal. Update timeout and poll again. */
386  goto update_timeout;
387  }
388 
389  have_signals = 0;
390  nevents = 0;
391 
392  {
393  /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
394  union {
395  struct epoll_event* events;
396  uv__io_t* watchers;
397  } x;
398 
399  x.events = events;
400  assert(loop->watchers != NULL);
401  loop->watchers[loop->nwatchers] = x.watchers;
402  loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
403  }
404 
405  for (i = 0; i < nfds; i++) {
406  pe = events + i;
407  fd = pe->data.fd;
408 
409  /* Skip invalidated events, see uv__platform_invalidate_fd */
410  if (fd == -1)
411  continue;
412 
413  assert(fd >= 0);
414  assert((unsigned) fd < loop->nwatchers);
415 
416  w = loop->watchers[fd];
417 
418  if (w == NULL) {
419  /* File descriptor that we've stopped watching, disarm it.
420  *
421  * Ignore all errors because we may be racing with another thread
422  * when the file descriptor is closed.
423  */
424  epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
425  continue;
426  }
427 
428  /* Give users only events they're interested in. Prevents spurious
429  * callbacks when previous callback invocation in this loop has stopped
430  * the current watcher. Also, filters out events that users has not
431  * requested us to watch.
432  */
433  pe->events &= w->pevents | POLLERR | POLLHUP;
434 
435  /* Work around an epoll quirk where it sometimes reports just the
436  * EPOLLERR or EPOLLHUP event. In order to force the event loop to
437  * move forward, we merge in the read/write events that the watcher
438  * is interested in; uv__read() and uv__write() will then deal with
439  * the error or hangup in the usual fashion.
440  *
441  * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
442  * reads the available data, calls uv_read_stop(), then sometime later
443  * calls uv_read_start() again. By then, libuv has forgotten about the
444  * hangup and the kernel won't report EPOLLIN again because there's
445  * nothing left to read. If anything, libuv is to blame here. The
446  * current hack is just a quick bandaid; to properly fix it, libuv
447  * needs to remember the error/hangup event. We should get that for
448  * free when we switch over to edge-triggered I/O.
449  */
450  if (pe->events == POLLERR || pe->events == POLLHUP)
451  pe->events |=
452  w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
453 
454  if (pe->events != 0) {
455  /* Run signal watchers last. This also affects child process watchers
456  * because those are implemented in terms of signal watchers.
457  */
458  if (w == &loop->signal_io_watcher) {
459  have_signals = 1;
460  } else {
462  w->cb(loop, w, pe->events);
463  }
464 
465  nevents++;
466  }
467  }
468 
469  if (reset_timeout != 0) {
470  timeout = user_timeout;
471  reset_timeout = 0;
472  }
473 
474  if (have_signals != 0) {
476  loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
477  }
478 
479  loop->watchers[loop->nwatchers] = NULL;
480  loop->watchers[loop->nwatchers + 1] = NULL;
481 
482  if (have_signals != 0)
483  return; /* Event loop should cycle now so don't poll again. */
484 
485  if (nevents != 0) {
486  if (nfds == ARRAY_SIZE(events) && --count != 0) {
487  /* Poll for more events but don't block this time. */
488  timeout = 0;
489  continue;
490  }
491  return;
492  }
493 
494  if (timeout == 0)
495  return;
496 
497  if (timeout == -1)
498  continue;
499 
500 update_timeout:
501  assert(timeout > 0);
502 
503  real_timeout -= (loop->time - base);
504  if (real_timeout <= 0)
505  return;
506 
507  timeout = real_timeout;
508  }
509 }
510 
511 
513  static clock_t fast_clock_id = -1;
514  struct timespec t;
515  clock_t clock_id;
516 
517  /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
518  * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
519  * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
520  * decide to make a costly system call.
521  */
522  /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
523  * when it has microsecond granularity or better (unlikely).
524  */
525  clock_id = CLOCK_MONOTONIC;
526  if (type != UV_CLOCK_FAST)
527  goto done;
528 
529  clock_id = uv__load_relaxed(&fast_clock_id);
530  if (clock_id != -1)
531  goto done;
532 
533  clock_id = CLOCK_MONOTONIC;
534  if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
535  if (t.tv_nsec <= 1 * 1000 * 1000)
536  clock_id = CLOCK_MONOTONIC_COARSE;
537 
538  uv__store_relaxed(&fast_clock_id, clock_id);
539 
540 done:
541 
542  if (clock_gettime(clock_id, &t))
543  return 0; /* Not really possible. */
544 
545  return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
546 }
547 
548 
549 int uv_resident_set_memory(size_t* rss) {
550  char buf[1024];
551  const char* s;
552  ssize_t n;
553  long val;
554  int fd;
555  int i;
556 
557  do
558  fd = open("/proc/self/stat", O_RDONLY);
559  while (fd == -1 && errno == EINTR);
560 
561  if (fd == -1)
562  return UV__ERR(errno);
563 
564  do
565  n = read(fd, buf, sizeof(buf) - 1);
566  while (n == -1 && errno == EINTR);
567 
568  uv__close(fd);
569  if (n == -1)
570  return UV__ERR(errno);
571  buf[n] = '\0';
572 
573  s = strchr(buf, ' ');
574  if (s == NULL)
575  goto err;
576 
577  s += 1;
578  if (*s != '(')
579  goto err;
580 
581  s = strchr(s, ')');
582  if (s == NULL)
583  goto err;
584 
585  for (i = 1; i <= 22; i++) {
586  s = strchr(s + 1, ' ');
587  if (s == NULL)
588  goto err;
589  }
590 
591  errno = 0;
592  val = strtol(s, NULL, 10);
593  if (errno != 0)
594  goto err;
595  if (val < 0)
596  goto err;
597 
598  *rss = val * getpagesize();
599  return 0;
600 
601 err:
602  return UV_EINVAL;
603 }
604 
605 
606 int uv_uptime(double* uptime) {
607  static volatile int no_clock_boottime;
608  struct timespec now;
609  int r;
610 
611  /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
612  * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
613  * is suspended.
614  */
615  if (no_clock_boottime) {
616  retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
617  }
618  else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
619  no_clock_boottime = 1;
620  goto retry;
621  }
622 
623  if (r)
624  return UV__ERR(errno);
625 
626  *uptime = now.tv_sec;
627  return 0;
628 }
629 
630 
631 static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
632  unsigned int num;
633  char buf[1024];
634 
635  if (!fgets(buf, sizeof(buf), statfile_fp))
636  return UV_EIO;
637 
638  num = 0;
639  while (fgets(buf, sizeof(buf), statfile_fp)) {
640  if (strncmp(buf, "cpu", 3))
641  break;
642  num++;
643  }
644 
645  if (num == 0)
646  return UV_EIO;
647 
648  *numcpus = num;
649  return 0;
650 }
651 
652 
653 int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
654  unsigned int numcpus;
655  uv_cpu_info_t* ci;
656  int err;
657  FILE* statfile_fp;
658 
659  *cpu_infos = NULL;
660  *count = 0;
661 
662  statfile_fp = uv__open_file("/proc/stat");
663  if (statfile_fp == NULL)
664  return UV__ERR(errno);
665 
666  err = uv__cpu_num(statfile_fp, &numcpus);
667  if (err < 0)
668  goto out;
669 
670  err = UV_ENOMEM;
671  ci = uv__calloc(numcpus, sizeof(*ci));
672  if (ci == NULL)
673  goto out;
674 
675  err = read_models(numcpus, ci);
676  if (err == 0)
677  err = read_times(statfile_fp, numcpus, ci);
678 
679  if (err) {
680  uv_free_cpu_info(ci, numcpus);
681  goto out;
682  }
683 
684  /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
685  * We don't check for errors here. Worst case, the field is left zero.
686  */
687  if (ci[0].speed == 0)
688  read_speeds(numcpus, ci);
689 
690  *cpu_infos = ci;
691  *count = numcpus;
692  err = 0;
693 
694 out:
695 
696  if (fclose(statfile_fp))
697  if (errno != EINTR && errno != EINPROGRESS)
698  abort();
699 
700  return err;
701 }
702 
703 
704 static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
705  unsigned int num;
706 
707  for (num = 0; num < numcpus; num++)
708  ci[num].speed = read_cpufreq(num) / 1000;
709 }
710 
711 
712 /* Also reads the CPU frequency on x86. The other architectures only have
713  * a BogoMIPS field, which may not be very accurate.
714  *
715  * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
716  */
717 static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
718  static const char model_marker[] = "model name\t: ";
719  static const char speed_marker[] = "cpu MHz\t\t: ";
720  const char* inferred_model;
721  unsigned int model_idx;
722  unsigned int speed_idx;
723  char buf[1024];
724  char* model;
725  FILE* fp;
726 
727  /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
728  (void) &model_marker;
729  (void) &speed_marker;
730  (void) &speed_idx;
731  (void) &model;
732  (void) &buf;
733  (void) &fp;
734 
735  model_idx = 0;
736  speed_idx = 0;
737 
738 #if defined(__arm__) || \
739  defined(__i386__) || \
740  defined(__mips__) || \
741  defined(__x86_64__)
742  fp = uv__open_file("/proc/cpuinfo");
743  if (fp == NULL)
744  return UV__ERR(errno);
745 
746  while (fgets(buf, sizeof(buf), fp)) {
747  if (model_idx < numcpus) {
748  if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
749  model = buf + sizeof(model_marker) - 1;
750  model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
751  if (model == NULL) {
752  fclose(fp);
753  return UV_ENOMEM;
754  }
755  ci[model_idx++].model = model;
756  continue;
757  }
758  }
759 #if defined(__arm__) || defined(__mips__)
760  if (model_idx < numcpus) {
761 #if defined(__arm__)
762  /* Fallback for pre-3.8 kernels. */
763  static const char model_marker[] = "Processor\t: ";
764 #else /* defined(__mips__) */
765  static const char model_marker[] = "cpu model\t\t: ";
766 #endif
767  if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
768  model = buf + sizeof(model_marker) - 1;
769  model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
770  if (model == NULL) {
771  fclose(fp);
772  return UV_ENOMEM;
773  }
774  ci[model_idx++].model = model;
775  continue;
776  }
777  }
778 #else /* !__arm__ && !__mips__ */
779  if (speed_idx < numcpus) {
780  if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
781  ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
782  continue;
783  }
784  }
785 #endif /* __arm__ || __mips__ */
786  }
787 
788  fclose(fp);
789 #endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
790 
791  /* Now we want to make sure that all the models contain *something* because
792  * it's not safe to leave them as null. Copy the last entry unless there
793  * isn't one, in that case we simply put "unknown" into everything.
794  */
795  inferred_model = "unknown";
796  if (model_idx > 0)
797  inferred_model = ci[model_idx - 1].model;
798 
799  while (model_idx < numcpus) {
800  model = uv__strndup(inferred_model, strlen(inferred_model));
801  if (model == NULL)
802  return UV_ENOMEM;
803  ci[model_idx++].model = model;
804  }
805 
806  return 0;
807 }
808 
809 
810 static int read_times(FILE* statfile_fp,
811  unsigned int numcpus,
812  uv_cpu_info_t* ci) {
813  struct uv_cpu_times_s ts;
814  unsigned int ticks;
815  unsigned int multiplier;
816  uint64_t user;
817  uint64_t nice;
818  uint64_t sys;
819  uint64_t idle;
820  uint64_t dummy;
821  uint64_t irq;
822  uint64_t num;
823  uint64_t len;
824  char buf[1024];
825 
826  ticks = (unsigned int)sysconf(_SC_CLK_TCK);
827  multiplier = ((uint64_t)1000L / ticks);
828  assert(ticks != (unsigned int) -1);
829  assert(ticks != 0);
830 
831  rewind(statfile_fp);
832 
833  if (!fgets(buf, sizeof(buf), statfile_fp))
834  abort();
835 
836  num = 0;
837 
838  while (fgets(buf, sizeof(buf), statfile_fp)) {
839  if (num >= numcpus)
840  break;
841 
842  if (strncmp(buf, "cpu", 3))
843  break;
844 
845  /* skip "cpu<num> " marker */
846  {
847  unsigned int n;
848  int r = sscanf(buf, "cpu%u ", &n);
849  assert(r == 1);
850  (void) r; /* silence build warning */
851  for (len = sizeof("cpu0"); n /= 10; len++);
852  }
853 
854  /* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
855  * guest, guest_nice but we're only interested in the first four + irq.
856  *
857  * Don't use %*s to skip fields or %ll to read straight into the uint64_t
858  * fields, they're not allowed in C89 mode.
859  */
860  if (6 != sscanf(buf + len,
861  "%" PRIu64 " %" PRIu64 " %" PRIu64
862  "%" PRIu64 " %" PRIu64 " %" PRIu64,
863  &user,
864  &nice,
865  &sys,
866  &idle,
867  &dummy,
868  &irq))
869  abort();
870 
871  ts.user = user * multiplier;
872  ts.nice = nice * multiplier;
873  ts.sys = sys * multiplier;
874  ts.idle = idle * multiplier;
875  ts.irq = irq * multiplier;
876  ci[num++].cpu_times = ts;
877  }
878  assert(num == numcpus);
879 
880  return 0;
881 }
882 
883 
884 static uint64_t read_cpufreq(unsigned int cpunum) {
885  uint64_t val;
886  char buf[1024];
887  FILE* fp;
888 
889  snprintf(buf,
890  sizeof(buf),
891  "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
892  cpunum);
893 
894  fp = uv__open_file(buf);
895  if (fp == NULL)
896  return 0;
897 
898  if (fscanf(fp, "%" PRIu64, &val) != 1)
899  val = 0;
900 
901  fclose(fp);
902 
903  return val;
904 }
905 
906 
907 static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
908  if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
909  return 1;
910  if (ent->ifa_addr == NULL)
911  return 1;
912  /*
913  * On Linux getifaddrs returns information related to the raw underlying
914  * devices. We're not interested in this information yet.
915  */
916  if (ent->ifa_addr->sa_family == PF_PACKET)
917  return exclude_type;
918  return !exclude_type;
919 }
920 
922 #ifndef HAVE_IFADDRS_H
923  *count = 0;
924  *addresses = NULL;
925  return UV_ENOSYS;
926 #else
927  struct ifaddrs *addrs, *ent;
928  uv_interface_address_t* address;
929  int i;
930  struct sockaddr_ll *sll;
931 
932  *count = 0;
933  *addresses = NULL;
934 
935  if (getifaddrs(&addrs))
936  return UV__ERR(errno);
937 
938  /* Count the number of interfaces */
939  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
941  continue;
942 
943  (*count)++;
944  }
945 
946  if (*count == 0) {
947  freeifaddrs(addrs);
948  return 0;
949  }
950 
951  /* Make sure the memory is initiallized to zero using calloc() */
952  *addresses = uv__calloc(*count, sizeof(**addresses));
953  if (!(*addresses)) {
954  freeifaddrs(addrs);
955  return UV_ENOMEM;
956  }
957 
958  address = *addresses;
959 
960  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
962  continue;
963 
964  address->name = uv__strdup(ent->ifa_name);
965 
966  if (ent->ifa_addr->sa_family == AF_INET6) {
967  address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
968  } else {
969  address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
970  }
971 
972  if (ent->ifa_netmask->sa_family == AF_INET6) {
973  address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
974  } else {
975  address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
976  }
977 
978  address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
979 
980  address++;
981  }
982 
983  /* Fill in physical addresses for each interface */
984  for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
986  continue;
987 
988  address = *addresses;
989 
990  for (i = 0; i < (*count); i++) {
991  size_t namelen = strlen(ent->ifa_name);
992  /* Alias interface share the same physical address */
993  if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
994  (address->name[namelen] == 0 || address->name[namelen] == ':')) {
995  sll = (struct sockaddr_ll*)ent->ifa_addr;
996  memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
997  }
998  address++;
999  }
1000  }
1001 
1002  freeifaddrs(addrs);
1003 
1004  return 0;
1005 #endif
1006 }
1007 
1008 
1010  int count) {
1011  int i;
1012 
1013  for (i = 0; i < count; i++) {
1014  uv__free(addresses[i].name);
1015  }
1016 
1017  uv__free(addresses);
1018 }
1019 
1020 
1021 void uv__set_process_title(const char* title) {
1022 #if defined(PR_SET_NAME)
1023  prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
1024 #endif
1025 }
1026 
1027 
1028 static int uv__slurp(const char* filename, char* buf, size_t len) {
1029  ssize_t n;
1030  int fd;
1031 
1032  assert(len > 0);
1033 
1035  if (fd < 0)
1036  return fd;
1037 
1038  do
1039  n = read(fd, buf, len - 1);
1040  while (n == -1 && errno == EINTR);
1041 
1043  abort();
1044 
1045  if (n < 0)
1046  return UV__ERR(errno);
1047 
1048  buf[n] = '\0';
1049 
1050  return 0;
1051 }
1052 
1053 
1054 static uint64_t uv__read_proc_meminfo(const char* what) {
1055  uint64_t rc;
1056  char* p;
1057  char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
1058 
1059  if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
1060  return 0;
1061 
1062  p = strstr(buf, what);
1063 
1064  if (p == NULL)
1065  return 0;
1066 
1067  p += strlen(what);
1068 
1069  rc = 0;
1070  sscanf(p, "%" PRIu64 " kB", &rc);
1071 
1072  return rc * 1024;
1073 }
1074 
1075 
1077  struct sysinfo info;
1078  uint64_t rc;
1079 
1080  rc = uv__read_proc_meminfo("MemFree:");
1081 
1082  if (rc != 0)
1083  return rc;
1084 
1085  if (0 == sysinfo(&info))
1086  return (uint64_t) info.freeram * info.mem_unit;
1087 
1088  return 0;
1089 }
1090 
1091 
1093  struct sysinfo info;
1094  uint64_t rc;
1095 
1096  rc = uv__read_proc_meminfo("MemTotal:");
1097 
1098  if (rc != 0)
1099  return rc;
1100 
1101  if (0 == sysinfo(&info))
1102  return (uint64_t) info.totalram * info.mem_unit;
1103 
1104  return 0;
1105 }
1106 
1107 
1108 static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
1109  char filename[256];
1110  char buf[32]; /* Large enough to hold an encoded uint64_t. */
1111  uint64_t rc;
1112 
1113  rc = 0;
1114  snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
1115  if (0 == uv__slurp(filename, buf, sizeof(buf)))
1116  sscanf(buf, "%" PRIu64, &rc);
1117 
1118  return rc;
1119 }
1120 
1121 
1123  /*
1124  * This might return 0 if there was a problem getting the memory limit from
1125  * cgroups. This is OK because a return value of 0 signifies that the memory
1126  * limit is unknown.
1127  */
1128  return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
1129 }
1130 
1131 
1132 void uv_loadavg(double avg[3]) {
1133  struct sysinfo info;
1134  char buf[128]; /* Large enough to hold all of /proc/loadavg. */
1135 
1136  if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
1137  if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
1138  return;
1139 
1140  if (sysinfo(&info) < 0)
1141  return;
1142 
1143  avg[0] = (double) info.loads[0] / 65536.0;
1144  avg[1] = (double) info.loads[1] / 65536.0;
1145  avg[2] = (double) info.loads[2] / 65536.0;
1146 }
size_t len
Definition: 6502dis.c:15
ut8 op
Definition: 6502dis.c:13
#define e(frag)
#define ARRAY_SIZE(a)
__BEGIN_DECLS int getifaddrs(struct ifaddrs **ifap)
void freeifaddrs(struct ifaddrs *ifa)
lzma_index ** i
Definition: index.h:629
ut16 val
Definition: armass64_const.h:6
static bool err
Definition: armass.c:435
RzBinInfo * info(RzBinFile *bf)
Definition: bin_ne.c:86
const lzma_allocator const uint8_t size_t uint8_t * out
Definition: block.h:528
#define O_CLOEXEC
Definition: compat.h:80
#define NULL
Definition: cris-opc.c:27
#define r
Definition: crypto_rc6.c:12
#define w
Definition: crypto_rc6.c:13
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void count
Definition: sflib.h:98
struct tab * done
Definition: enough.c:233
#define UV__ERR(x)
Definition: errno.h:29
const char * filename
Definition: ioapi.h:137
voidpf void * buf
Definition: ioapi.h:138
snprintf
Definition: kernel.h:364
return memset(p, 0, total)
void * p
Definition: libc.cpp:67
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
#define PRIu64
Definition: macros.h:18
static static fork const void static count static fd const char const char static newpath char char char static envp time_t static t const char static mode static whence const char static dir time_t static t unsigned static seconds const char struct utimbuf static buf nice
Definition: sflib.h:61
static static fork const void static count static fd const char const char static newpath char char char static envp time_t static t const char static mode static whence const char static dir time_t static t unsigned static seconds const char struct utimbuf static buf static inc static sig const char static mode static oldfd struct tms static buf static getgid static geteuid const char static filename static arg static mask struct ustat static ubuf static getppid static setsid static egid sigset_t static set struct timeval struct timezone static tz fd_set fd_set fd_set struct timeval static timeout const char char static bufsiz const char static swapflags void static offset const char static length static mode static who const char struct statfs static buf unsigned unsigned num
Definition: sflib.h:126
static uint64_t uv__read_cgroups_uint64(const char *cgroup, const char *param)
Definition: linux-core.c:1108
static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type)
Definition: linux-core.c:907
#define CLOCK_BOOTTIME
Definition: linux-core.c:75
static int read_times(FILE *statfile_fp, unsigned int numcpus, uv_cpu_info_t *ci)
Definition: linux-core.c:810
uint64_t uv_get_total_memory(void)
Definition: linux-core.c:1092
static uint64_t read_cpufreq(unsigned int cpunum)
Definition: linux-core.c:884
static uint64_t uv__read_proc_meminfo(const char *what)
Definition: linux-core.c:1054
void uv_loadavg(double avg[3])
Definition: linux-core.c:1132
void uv__set_process_title(const char *title)
Definition: linux-core.c:1021
static void read_speeds(unsigned int numcpus, uv_cpu_info_t *ci)
Definition: linux-core.c:704
void uv__platform_invalidate_fd(uv_loop_t *loop, int fd)
Definition: linux-core.c:137
uint64_t uv_get_free_memory(void)
Definition: linux-core.c:1076
int uv_interface_addresses(uv_interface_address_t **addresses, int *count)
Definition: linux-core.c:921
void uv__io_poll(uv_loop_t *loop, int timeout)
Definition: linux-core.c:191
uint64_t uv__hrtime(uv_clocktype_t type)
Definition: linux-core.c:512
int uv__io_check_fd(uv_loop_t *loop, int fd)
Definition: linux-core.c:170
static int read_models(unsigned int numcpus, uv_cpu_info_t *ci)
Definition: linux-core.c:717
int uv__platform_loop_init(uv_loop_t *loop)
Definition: linux-core.c:86
static int uv__cpu_num(FILE *statfile_fp, unsigned int *numcpus)
Definition: linux-core.c:631
int uv_uptime(double *uptime)
Definition: linux-core.c:606
void uv_free_interface_addresses(uv_interface_address_t *addresses, int count)
Definition: linux-core.c:1009
int uv__io_fork(uv_loop_t *loop)
Definition: linux-core.c:111
static int uv__slurp(const char *filename, char *buf, size_t len)
Definition: linux-core.c:1028
int uv_cpu_info(uv_cpu_info_t **cpu_infos, int *count)
Definition: linux-core.c:653
uint64_t uv_get_constrained_memory(void)
Definition: linux-core.c:1122
void uv__platform_loop_delete(uv_loop_t *loop)
Definition: linux-core.c:129
#define CLOCK_MONOTONIC_COARSE
Definition: linux-core.c:67
int uv_resident_set_memory(size_t *rss)
Definition: linux-core.c:549
int uv__inotify_fork(uv_loop_t *loop, void *old_watchers)
Definition: linux-inotify.c:86
static const char struct stat static buf struct stat static buf static vhangup int struct rusage static rusage sysinfo
Definition: sflib.h:147
static const char struct stat static buf struct stat static buf static vhangup int struct rusage static rusage struct sysinfo static info unsigned static __unused struct utsname static buf const char static size const char static name static pid unsigned static persona static fsgid const void static flags const struct iovec static count static fd const void static len static munlockall struct sched_param static p static sched_yield static policy const struct timespec struct timespec static rem uid_t uid_t uid_t static suid struct pollfd unsigned nfds
Definition: sflib.h:196
assert(limit<=UINT32_MAX/2)
int x
Definition: mipsasm.c:20
int n
Definition: mipsasm.c:19
int type
Definition: mipsasm.c:17
string FILE
Definition: benchmark.py:21
int epoll_wait(uv__os390_epoll *lst, struct epoll_event *events, int maxevents, int timeout)
int epoll_ctl(uv__os390_epoll *lst, int op, int fd, struct epoll_event *event)
uv__os390_epoll * epoll_create1(int flags)
#define EPOLL_CTL_ADD
#define EPOLL_CTL_MOD
#define EPOLL_CTL_DEL
#define QUEUE_DATA(ptr, type, field)
Definition: queue.h:30
#define QUEUE_EMPTY(q)
Definition: queue.h:39
#define QUEUE_HEAD(q)
Definition: queue.h:42
#define QUEUE_INIT(q)
Definition: queue.h:45
void * QUEUE[2]
Definition: queue.h:21
#define QUEUE_REMOVE(q)
Definition: queue.h:101
static RzSocket * s
Definition: rtr.c:28
static struct sockaddr static addrlen static backlog const void static flags void flags
Definition: sfsocketcall.h:123
static int
Definition: sfsocketcall.h:114
#define EEXIST
Definition: sftypes.h:127
#define EINVAL
Definition: sftypes.h:132
int int32_t
Definition: sftypes.h:33
#define EINTR
Definition: sftypes.h:114
#define EINPROGRESS
Definition: sftypes.h:175
#define O_RDONLY
Definition: sftypes.h:486
#define PF_PACKET
Definition: sftypes.h:271
int clock_t
Definition: sftypes.h:43
#define AF_INET6
Definition: sftypes.h:295
unsigned long uint64_t
Definition: sftypes.h:28
int ssize_t
Definition: sftypes.h:39
int sigset_t
Definition: sftypes.h:63
_W64 unsigned int uintptr_t
struct sockaddr * ifa_addr
unsigned int ifa_flags
char * ifa_name
struct ifaddrs * ifa_next
struct sockaddr * ifa_netmask
Definition: z80asm.h:102
long tv_nsec
Definition: sftypes.h:90
time_t tv_sec
Definition: sftypes.h:89
Definition: unix.h:96
struct uv_cpu_times_s cpu_times
Definition: uv.h:1093
char * model
Definition: uv.h:1091
int speed
Definition: uv.h:1092
uint64_t nice
Definition: uv.h:1084
uint64_t sys
Definition: uv.h:1085
uint64_t idle
Definition: uv.h:1086
uint64_t user
Definition: uv.h:1083
uint64_t irq
Definition: uv.h:1087
struct sockaddr_in6 netmask6
Definition: uv.h:1106
struct sockaddr_in6 address6
Definition: uv.h:1102
struct sockaddr_in netmask4
Definition: uv.h:1105
union uv_interface_address_s::@399 netmask
union uv_interface_address_s::@398 address
struct sockaddr_in address4
Definition: uv.h:1101
char phys_addr[6]
Definition: uv.h:1098
Definition: uv.h:1780
uv_loop_t * loop
Definition: main.c:7
uv_timer_t timeout
Definition: main.c:9
void uv__io_stop(uv_loop_t *loop, uv__io_t *w, unsigned int events)
Definition: core.c:910
int uv__open_cloexec(const char *path, int flags)
Definition: core.c:1003
int uv__close_nocheckstdio(int fd)
Definition: core.c:550
int uv__close(int fd)
Definition: core.c:569
FILE * uv__open_file(const char *path)
Definition: core.c:473
#define uv__cloexec
Definition: internal.h:169
uv_clocktype_t
Definition: internal.h:146
@ UV_CLOCK_FAST
Definition: internal.h:148
#define UV__POLLRDHUP
Definition: internal.h:116
#define UV__POLLPRI
Definition: internal.h:122
@ UV_LOOP_BLOCK_SIGPROF
Definition: internal.h:137
#define SAVE_ERRNO(block)
Definition: internal.h:92
@ UV__EXCLUDE_IFPHYS
Definition: internal.h:142
@ UV__EXCLUDE_IFADDR
Definition: internal.h:143
Definition: dis.c:32
char * uv__strndup(const char *s, size_t n)
Definition: uv-common.c:63
char * uv__strdup(const char *s)
Definition: uv-common.c:55
void * uv__calloc(size_t count, size_t size)
Definition: uv-common.c:92
void uv__metrics_update_idle_time(uv_loop_t *loop)
Definition: uv-common.c:872
void uv__free(void *ptr)
Definition: uv-common.c:81
void uv__metrics_set_provider_entry_time(uv_loop_t *loop)
Definition: uv-common.c:899
#define uv__load_relaxed(p)
Definition: uv-common.h:67
#define uv__store_relaxed(p, v)
Definition: uv-common.h:68
#define uv__get_internal_fields(loop)
Definition: uv-common.h:336
UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t *cpu_infos, int count)
Definition: uv-common.c:846
@ UV_METRICS_IDLE_TIME
Definition: uv.h:251
static const z80_opcode fd[]
Definition: z80_tab.h:997
int read(izstream &zs, T *x, Items items)
Definition: zstream.h:115