Rizin
unix-like reverse engineering framework and cli tools
buf_sparse.c
Go to the documentation of this file.
1 // SPDX-FileCopyrightText: 2009-2020 ret2libc <sirmy15@gmail.com>
2 // SPDX-FileCopyrightText: 2021 Florian Märkl <info@florianmaerkl.de>
3 // SPDX-License-Identifier: LGPL-3.0-only
4 
5 #include <rz_util.h>
6 
7 typedef struct sparse_init_config_t {
11 
12 typedef struct buf_sparse_priv {
18 
19 static void chunk_fini(void *a, void *user) {
21  free(s->data);
22 }
23 
24 static bool sparse_limits(SparsePriv *priv, ut64 *max) {
25  if (rz_vector_empty(&priv->chunks)) {
26  return false;
27  }
29  *max = c->to + 1;
30  return true;
31 }
32 
33 static int chunk_cmp(ut64 addr, void *a) {
35  return RZ_NUM_CMP(addr, c->from);
36 }
37 
41 static size_t chunk_index_in(SparsePriv *priv, ut64 addr) {
42  size_t i;
44  return i;
45 }
46 
50 static st64 sparse_write(SparsePriv *priv, ut64 addr, const ut8 *data, ut64 len) {
51  if (!len) {
52  return -1;
53  }
54  if (addr + len < addr) {
55  // clamp to UT64_MAX (inclusive)
56  len = 0 - addr;
57  }
58  size_t in_start_index = chunk_index_in(priv, addr);
59  size_t in_end_index = chunk_index_in(priv, addr + len < addr ? UT64_MAX : addr + len);
60  RzBufferSparseChunk *c = NULL; // the chunk where we will write into
61  if (in_start_index) {
62  // if we start writing inside an existing chunk, use it.
63  c = rz_vector_index_ptr(&priv->chunks, in_start_index - 1);
64  if (addr > c->to) {
65  // already after it
66  c = NULL;
67  } else {
68  // inside of it, our start index is the index of this chunk
69  in_start_index--;
70  }
71  }
72  if (!c) {
73  c = rz_vector_insert(&priv->chunks, in_start_index, NULL);
74  if (in_end_index) {
75  // adjust after insertion
76  in_end_index++;
77  }
78  c->from = addr;
79  c->to = c->from;
80  c->data = NULL;
81  }
82  // (re)alloc buffer and fill with appropriate data
83  ut64 newto = addr + len - 1;
84  RzBufferSparseChunk *in_end_chunk = NULL;
85  if (in_end_index) {
86  in_end_chunk = rz_vector_index_ptr(&priv->chunks, in_end_index - 1);
87  if (in_end_chunk->to > newto) {
88  newto = in_end_chunk->to;
89  } else {
90  // completely swallowed this chunk, nothing to copy
91  in_end_chunk = NULL;
92  }
93  }
94  ut8 *newbuf = realloc(c->data, newto - c->from + 1);
95  if (!newbuf) {
96  return -1;
97  }
98  c->data = newbuf;
99  c->to = newto;
100  memcpy(c->data + (addr - c->from), data, len);
101  if (in_end_chunk && in_end_chunk != c) {
102  memcpy(c->data + (addr - c->from) + len,
103  in_end_chunk->data + (addr + len - in_end_chunk->from),
104  in_end_chunk->to - (addr + len) + 1);
105  }
106  // remove all chunks that are now overlapped and overwritten
107  if (in_end_index && in_start_index < in_end_index - 1) {
108  // remove_range does not free by design
109  for (size_t i = in_start_index + 1; i <= in_end_index - 1; i++) {
111  }
112  rz_vector_remove_range(&priv->chunks, in_start_index + 1, in_end_index - (in_start_index + 1), NULL);
113  }
114  return len;
115 }
116 
117 static inline struct buf_sparse_priv *get_priv_sparse(RzBuffer *b) {
118  struct buf_sparse_priv *priv = (struct buf_sparse_priv *)b->priv;
119  rz_warn_if_fail(priv);
120  return priv;
121 }
122 
123 static bool buf_sparse_init(RzBuffer *b, const void *user) {
124  SparsePriv *priv = RZ_NEW0(struct buf_sparse_priv);
125  if (!priv) {
126  return false;
127  }
128  if (user) {
129  SparseInitConfig *cfg = (void *)user;
130  priv->base = cfg->base;
131  if (priv->base) {
132  rz_buf_ref(priv->base);
133  }
134  priv->write_mode = cfg->write_mode;
135  } else {
137  }
139  priv->offset = 0;
140  b->priv = priv;
141  return true;
142 }
143 
144 static bool buf_sparse_fini(RzBuffer *b) {
145  struct buf_sparse_priv *priv = get_priv_sparse(b);
146  rz_vector_fini(&priv->chunks);
147  rz_buf_free(priv->base);
148  RZ_FREE(b->priv);
149  return true;
150 }
151 
152 static bool buf_sparse_resize(RzBuffer *b, ut64 newsize) {
153  SparsePriv *priv = get_priv_sparse(b);
154  size_t n;
155  rz_vector_lower_bound(&priv->chunks, newsize, n, chunk_cmp);
156  // now n == index of the first chunk to be thrown away entirely
157  if (n < rz_vector_len(&priv->chunks)) {
158  // remove all excessive chunks if shrinking
159  for (size_t i = n; i < rz_vector_len(&priv->chunks); i++) {
161  }
163  }
164  // now n == rz_vector_len(&priv->chunks)
165  bool must_extend = false; // whether we must add another artificial chunk to reach exactly the size
166  if (n) {
168  if (newsize <= c->to) {
169  // must chop the now-last block
170  assert(newsize); // newsize > 0 is guaranteed when n > 0, otherwise the lower bound above would have returned 0.
171  c->to = newsize - 1;
172  ut8 *tmp = realloc(c->data, c->to - c->from + 1);
173  if (tmp) {
174  c->data = tmp;
175  }
176  } else {
177  must_extend = newsize && c->to < newsize - 1;
178  }
179  } else {
180  must_extend = !!newsize;
181  }
182  if (must_extend) {
183  // if necessary, add a byte to reach exactly the desired size
184  return !!sparse_write(priv, newsize - 1, &b->Oxff_priv, 1);
185  }
186  return true;
187 }
188 
190  SparsePriv *priv = get_priv_sparse(b);
191  ut64 max;
192  ut64 r = sparse_limits(priv, &max) ? max : 0;
193  if (priv->base) {
194  ut64 base_sz = rz_buf_size(priv->base);
195  if (base_sz > r) {
196  r = base_sz;
197  }
198  }
199  return r;
200 }
201 
203  if (!len) {
204  return 0;
205  }
206  SparsePriv *priv = get_priv_sparse(b);
207  ut64 max = priv->offset + len - 1;
208  if (max < priv->offset) {
209  max = UT64_MAX;
210  len = max - priv->offset + 1;
211  }
212  // first inside-chunk is special because we might start inside of it
213  size_t r = 0;
214  size_t i = chunk_index_in(priv, priv->offset);
215  if (i) {
217  if (priv->offset <= c->to) {
218  ut64 to = RZ_MIN(c->to, max);
219  ut64 rsz = to - priv->offset + 1;
220  memcpy(buf, c->data + (priv->offset - c->from), rsz);
221  priv->offset += rsz;
222  buf += rsz;
223  r += rsz;
224  }
225  }
226  // non-chunk/chunk alternating
227  while (priv->offset <= max) {
228  // in each iteration, write one part like [0xff, 0xff, 0xff][some chunk]
229  ut64 empty_to = max; // inclusive offset to which to fill with 0xff
230  ut64 next_off = empty_to + 1; // offset to start at in the next iteration
231  if (i < rz_vector_len(&priv->chunks)) {
233  if (c->from <= empty_to) {
234  next_off = RZ_MIN(c->to + 1, next_off);
235  empty_to = c->from - 1;
236  memcpy(buf + empty_to - priv->offset + 1, c->data, next_off - empty_to - 1);
237  r += next_off - priv->offset;
238  }
239  i++;
240  }
241  if (empty_to >= priv->offset) {
242  // fill non-chunk part with 0xff or base file
243  if (priv->base) {
244  rz_buf_read_at(priv->base, priv->offset, buf, empty_to - priv->offset + 1);
245  } else {
246  memset(buf, b->Oxff_priv, empty_to - priv->offset + 1);
247  }
248  }
249  buf += next_off - priv->offset;
250  priv->offset = next_off;
251  }
252  return priv->base ? len : r; // if there is a base file, read always fills the entire buffer (to keep the 0xff of the base)
253 }
254 
256  SparsePriv *priv = get_priv_sparse(b);
257  st64 r = -1;
258  switch (priv->write_mode) {
260  r = sparse_write(priv, priv->offset, buf, len);
261  break;
263  if (!priv->base) {
264  break;
265  }
266  r = rz_buf_write_at(priv->base, priv->offset, buf, len);
267  break;
268  }
269  if (r >= 0) {
270  priv->offset += r;
271  }
272  return r;
273 }
274 
275 static st64 buf_sparse_seek(RzBuffer *b, st64 addr, int whence) {
276  struct buf_sparse_priv *priv = get_priv_sparse(b);
277  ut64 max;
278  if (addr < 0 && (-addr) > (st64)priv->offset) {
279  return -1;
280  }
281 
282  switch (whence) {
283  case RZ_BUF_CUR:
284  priv->offset += addr;
285  break;
286  case RZ_BUF_SET:
287  priv->offset = addr;
288  break;
289  case RZ_BUF_END:
290  if (!sparse_limits(priv, &max)) {
291  max = 0;
292  }
293  priv->offset = max + addr;
294  break;
295  default:
297  return -1;
298  }
299  return priv->offset;
300 }
301 
304  .fini = buf_sparse_fini,
305  .read = buf_sparse_read,
306  .write = buf_sparse_write,
307  .get_size = buf_sparse_size,
308  .resize = buf_sparse_resize,
309  .seek = buf_sparse_seek
310 };
311 
315  if (b->methods != &buffer_sparse_methods) {
316  *count = 0;
317  return NULL;
318  }
319  SparsePriv *priv = get_priv_sparse(b);
320  *count = rz_vector_len(&priv->chunks);
321  return rz_vector_index_ptr(&priv->chunks, 0);
322 }
323 
327  if (b->methods != &buffer_sparse_methods) {
328  return;
329  }
330  SparsePriv *priv = get_priv_sparse(b);
331  priv->write_mode = mode;
332 }
333 
340  rz_return_val_if_fail(b, false);
341  if (b->methods != &buffer_sparse_methods) {
342  return false;
343  }
344  SparsePriv *priv = get_priv_sparse(b);
345  size_t from_i = chunk_index_in(priv, from);
346  if (from_i) {
347  RzBufferSparseChunk *c = rz_vector_index_ptr(&priv->chunks, from_i - 1);
348  if (from <= c->to) {
349  return true;
350  }
351  }
352  size_t to_i = chunk_index_in(priv, to);
353  return to_i > from_i;
354 }
size_t len
Definition: 6502dis.c:15
lzma_index ** i
Definition: index.h:629
static st64 sparse_write(SparsePriv *priv, ut64 addr, const ut8 *data, ut64 len)
Definition: buf_sparse.c:50
RZ_API bool rz_buf_sparse_populated_in(RzBuffer *b, ut64 from, ut64 to)
Definition: buf_sparse.c:339
static bool sparse_limits(SparsePriv *priv, ut64 *max)
Definition: buf_sparse.c:24
static ut64 buf_sparse_size(RzBuffer *b)
Definition: buf_sparse.c:189
static bool buf_sparse_init(RzBuffer *b, const void *user)
Definition: buf_sparse.c:123
RZ_API void rz_buf_sparse_set_write_mode(RzBuffer *b, RzBufferSparseWriteMode mode)
Only for sparse RzBuffers.
Definition: buf_sparse.c:325
static st64 buf_sparse_write(RzBuffer *b, const ut8 *buf, ut64 len)
Definition: buf_sparse.c:255
static size_t chunk_index_in(SparsePriv *priv, ut64 addr)
Definition: buf_sparse.c:41
struct buf_sparse_priv SparsePriv
static int chunk_cmp(ut64 addr, void *a)
Definition: buf_sparse.c:33
static bool buf_sparse_fini(RzBuffer *b)
Definition: buf_sparse.c:144
static bool buf_sparse_resize(RzBuffer *b, ut64 newsize)
Definition: buf_sparse.c:152
static st64 buf_sparse_seek(RzBuffer *b, st64 addr, int whence)
Definition: buf_sparse.c:275
RZ_API const RzBufferSparseChunk * rz_buf_sparse_get_chunks(RzBuffer *b, RZ_NONNULL size_t *count)
Only for sparse RzBuffers, get all sparse data chunks currently populated.
Definition: buf_sparse.c:313
static void chunk_fini(void *a, void *user)
Definition: buf_sparse.c:19
struct sparse_init_config_t SparseInitConfig
static const RzBufferMethods buffer_sparse_methods
Definition: buf_sparse.c:302
static st64 buf_sparse_read(RzBuffer *b, ut8 *buf, ut64 len)
Definition: buf_sparse.c:202
static struct buf_sparse_priv * get_priv_sparse(RzBuffer *b)
Definition: buf_sparse.c:117
#define RZ_API
#define NULL
Definition: cris-opc.c:27
#define r
Definition: crypto_rc6.c:12
static static sync static getppid static getegid const char static filename char static len const char char static bufsiz static mask static vfork const void static prot static getpgrp const char static swapflags static arg static fd static protocol static who struct sockaddr static addrlen static backlog struct timeval struct timezone static tz const struct iovec static count static mode const void const struct sockaddr static tolen const char static pathname void count
Definition: sflib.h:98
int max
Definition: enough.c:225
RZ_API void Ht_() free(HtName_(Ht) *ht)
Definition: ht_inc.c:130
voidpf uLong offset
Definition: ioapi.h:144
const char int mode
Definition: ioapi.h:137
voidpf void * buf
Definition: ioapi.h:138
uint8_t ut8
Definition: lh5801.h:11
return memset(p, 0, total)
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
void * realloc(void *ptr, size_t size)
Definition: malloc.c:144
assert(limit<=UINT32_MAX/2)
int n
Definition: mipsasm.c:19
static RzSocket * s
Definition: rtr.c:28
#define rz_warn_if_reached()
Definition: rz_assert.h:29
#define rz_warn_if_fail(expr)
Definition: rz_assert.h:35
#define rz_return_if_fail(expr)
Definition: rz_assert.h:100
#define rz_return_val_if_fail(expr, val)
Definition: rz_assert.h:108
#define RZ_BUF_CUR
Definition: rz_buf.h:15
RZ_API st64 rz_buf_write_at(RZ_NONNULL RzBuffer *b, ut64 addr, RZ_NONNULL const ut8 *buf, ut64 len)
Write len bytes of the buffer at the specified address.
Definition: buf.c:1197
RZ_API RzBuffer * rz_buf_ref(RzBuffer *b)
Increment the reference count of the buffer.
Definition: buf.c:668
RZ_API st64 rz_buf_read_at(RZ_NONNULL RzBuffer *b, ut64 addr, RZ_NONNULL RZ_OUT ut8 *buf, ut64 len)
Read len bytes of the buffer at the specified address.
Definition: buf.c:1136
#define RZ_BUF_END
Definition: rz_buf.h:16
RzBufferSparseWriteMode
Definition: rz_buf.h:59
@ RZ_BUF_SPARSE_WRITE_MODE_SPARSE
all writes are performed in the sparse overlay
Definition: rz_buf.h:60
@ RZ_BUF_SPARSE_WRITE_MODE_THROUGH
all writes are performed in the underlying base buffer
Definition: rz_buf.h:61
#define RZ_BUF_SET
Definition: rz_buf.h:14
RZ_API void rz_buf_free(RzBuffer *b)
Free all internal data hold by the buffer and the buffer.
Definition: buf.c:1253
RZ_API ut64 rz_buf_size(RZ_NONNULL RzBuffer *b)
Return the size of the buffer.
Definition: buf.c:1225
#define RZ_NUM_CMP(a, b)
Typical comparison (1/0/-1) for two numbers of arbitrary types, including unsigned.
Definition: rz_num.h:157
#define RZ_NEW0(x)
Definition: rz_types.h:284
#define RZ_NONNULL
Definition: rz_types.h:64
#define RZ_FREE(x)
Definition: rz_types.h:369
#define RZ_MIN(x, y)
#define st64
Definition: rz_types_base.h:10
#define UT64_MAX
Definition: rz_types_base.h:86
#define rz_vector_lower_bound(vec, x, i, cmp)
Definition: rz_vector.h:190
static void * rz_vector_index_ptr(RzVector *vec, size_t index)
Definition: rz_vector.h:88
#define rz_vector_upper_bound(vec, x, i, cmp)
Definition: rz_vector.h:203
RZ_API void rz_vector_fini(RzVector *vec)
Definition: vector.c:61
static size_t rz_vector_len(const RzVector *vec)
Definition: rz_vector.h:82
RZ_API void rz_vector_init(RzVector *vec, size_t elem_size, RzVectorFree free, void *free_user)
Definition: vector.c:33
static bool rz_vector_empty(const RzVector *vec)
Definition: rz_vector.h:74
RZ_API void * rz_vector_insert(RzVector *vec, size_t index, void *x)
Definition: vector.c:151
RZ_API void rz_vector_remove_range(RzVector *vec, size_t index, size_t count, void *into)
Definition: vector.c:139
static struct sockaddr static addrlen static backlog const void static flags void struct sockaddr from
Definition: sfsocketcall.h:123
static struct sockaddr static addrlen static backlog const void static flags void struct sockaddr socklen_t static fromlen const void const struct sockaddr to
Definition: sfsocketcall.h:125
#define b(i)
Definition: sha256.c:42
#define c(i)
Definition: sha256.c:43
#define a(i)
Definition: sha256.c:41
RzVector chunks
of RzBufferSparseChunk, non-overlapping, ordered by from addr
Definition: buf_sparse.c:14
RzBufferSparseWriteMode write_mode
Definition: buf_sparse.c:16
RzBuffer * base
If not NULL, unpopulated bytes are taken from this, else Oxff.
Definition: buf_sparse.c:13
ut8 * data
size == to - from + 1
Definition: rz_buf.h:56
ut64 from
inclusive
Definition: rz_buf.h:54
ut64 to
inclusive, there can't be chunks with size == 0
Definition: rz_buf.h:55
RzBufferInit init
Definition: rz_buf.h:32
RzBufferSparseWriteMode write_mode
Definition: buf_sparse.c:9
RzBuffer * base
Definition: buf_sparse.c:8
ut64(WINAPI *w32_GetEnabledXStateFeatures)()
static int addr
Definition: z80asm.c:58