Rizin
unix-like reverse engineering framework and cli tools
macros.h
Go to the documentation of this file.
1
// SPDX-FileCopyrightText: 2001, 2010 Niels Möller
2
// SPDX-License-Identifier: LGPL-3.0-only
3
4
/* macros.h
5
6
Copyright (C) 2001, 2010 Niels Möller
7
8
This file is part of GNU Nettle.
9
10
GNU Nettle is free software: you can redistribute it and/or
11
modify it under the terms of either:
12
13
* the GNU Lesser General Public License as published by the Free
14
Software Foundation; either version 3 of the License, or (at your
15
option) any later version.
16
17
or
18
19
* the GNU General Public License as published by the Free
20
Software Foundation; either version 2 of the License, or (at your
21
option) any later version.
22
23
or both in parallel, as here.
24
25
GNU Nettle is distributed in the hope that it will be useful,
26
but WITHOUT ANY WARRANTY; without even the implied warranty of
27
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
28
General Public License for more details.
29
30
You should have received copies of the GNU General Public License and
31
the GNU Lesser General Public License along with this program. If
32
not, see http://www.gnu.org/licenses/.
33
*/
34
35
#ifndef NETTLE_MACROS_H_INCLUDED
36
#define NETTLE_MACROS_H_INCLUDED
37
38
/* Reads a 64-bit integer, in network, big-endian, byte order */
39
#define READ_UINT64(p) \
40
( (((uint64_t) (p)[0]) << 56) \
41
| (((uint64_t) (p)[1]) << 48) \
42
| (((uint64_t) (p)[2]) << 40) \
43
| (((uint64_t) (p)[3]) << 32) \
44
| (((uint64_t) (p)[4]) << 24) \
45
| (((uint64_t) (p)[5]) << 16) \
46
| (((uint64_t) (p)[6]) << 8) \
47
| ((uint64_t) (p)[7]))
48
49
#define WRITE_UINT64(p, i) \
50
do { \
51
(p)[0] = ((i) >> 56) & 0xff; \
52
(p)[1] = ((i) >> 48) & 0xff; \
53
(p)[2] = ((i) >> 40) & 0xff; \
54
(p)[3] = ((i) >> 32) & 0xff; \
55
(p)[4] = ((i) >> 24) & 0xff; \
56
(p)[5] = ((i) >> 16) & 0xff; \
57
(p)[6] = ((i) >> 8) & 0xff; \
58
(p)[7] = (i) & 0xff; \
59
} while(0)
60
61
/* Reads a 32-bit integer, in network, big-endian, byte order */
62
#define READ_UINT32(p) \
63
( (((uint32_t) (p)[0]) << 24) \
64
| (((uint32_t) (p)[1]) << 16) \
65
| (((uint32_t) (p)[2]) << 8) \
66
| ((uint32_t) (p)[3]))
67
68
#define WRITE_UINT32(p, i) \
69
do { \
70
(p)[0] = ((i) >> 24) & 0xff; \
71
(p)[1] = ((i) >> 16) & 0xff; \
72
(p)[2] = ((i) >> 8) & 0xff; \
73
(p)[3] = (i) & 0xff; \
74
} while(0)
75
76
/* Analogous macros, for 24 and 16 bit numbers */
77
#define READ_UINT24(p) \
78
( (((uint32_t) (p)[0]) << 16) \
79
| (((uint32_t) (p)[1]) << 8) \
80
| ((uint32_t) (p)[2]))
81
82
#define WRITE_UINT24(p, i) \
83
do { \
84
(p)[0] = ((i) >> 16) & 0xff; \
85
(p)[1] = ((i) >> 8) & 0xff; \
86
(p)[2] = (i) & 0xff; \
87
} while(0)
88
89
#define READ_UINT16(p) \
90
( (((uint32_t) (p)[0]) << 8) \
91
| ((uint32_t) (p)[1]))
92
93
#define WRITE_UINT16(p, i) \
94
do { \
95
(p)[0] = ((i) >> 8) & 0xff; \
96
(p)[1] = (i) & 0xff; \
97
} while(0)
98
99
/* And the other, little-endian, byteorder */
100
#define LE_READ_UINT64(p) \
101
( (((uint64_t) (p)[7]) << 56) \
102
| (((uint64_t) (p)[6]) << 48) \
103
| (((uint64_t) (p)[5]) << 40) \
104
| (((uint64_t) (p)[4]) << 32) \
105
| (((uint64_t) (p)[3]) << 24) \
106
| (((uint64_t) (p)[2]) << 16) \
107
| (((uint64_t) (p)[1]) << 8) \
108
| ((uint64_t) (p)[0]))
109
110
#define LE_WRITE_UINT64(p, i) \
111
do { \
112
(p)[7] = ((i) >> 56) & 0xff; \
113
(p)[6] = ((i) >> 48) & 0xff; \
114
(p)[5] = ((i) >> 40) & 0xff; \
115
(p)[4] = ((i) >> 32) & 0xff; \
116
(p)[3] = ((i) >> 24) & 0xff; \
117
(p)[2] = ((i) >> 16) & 0xff; \
118
(p)[1] = ((i) >> 8) & 0xff; \
119
(p)[0] = (i) & 0xff; \
120
} while (0)
121
122
#define LE_READ_UINT32(p) \
123
( (((uint32_t) (p)[3]) << 24) \
124
| (((uint32_t) (p)[2]) << 16) \
125
| (((uint32_t) (p)[1]) << 8) \
126
| ((uint32_t) (p)[0]))
127
128
#define LE_WRITE_UINT32(p, i) \
129
do { \
130
(p)[3] = ((i) >> 24) & 0xff; \
131
(p)[2] = ((i) >> 16) & 0xff; \
132
(p)[1] = ((i) >> 8) & 0xff; \
133
(p)[0] = (i) & 0xff; \
134
} while(0)
135
136
/* Analogous macros, for 16 bit numbers */
137
#define LE_READ_UINT16(p) \
138
( (((uint32_t) (p)[1]) << 8) \
139
| ((uint32_t) (p)[0]))
140
141
#define LE_WRITE_UINT16(p, i) \
142
do { \
143
(p)[1] = ((i) >> 8) & 0xff; \
144
(p)[0] = (i) & 0xff; \
145
} while(0)
146
147
/* Macro to make it easier to loop over several blocks. */
148
#define FOR_BLOCKS(length, dst, src, blocksize) \
149
assert( !((length) % (blocksize))); \
150
for (; (length); ((length) -= (blocksize), \
151
(dst) += (blocksize), \
152
(src) += (blocksize)) )
153
154
/* The masking of the right shift is needed to allow n == 0 (using
155
just 32 - n and 64 - n results in undefined behaviour). Most uses
156
of these macros use a constant and non-zero rotation count. */
157
#define ROTL32(n,x) (((x)<<(n)) | ((x)>>((-(n)&31))))
158
159
#define ROTL64(n,x) (((x)<<(n)) | ((x)>>((-(n))&63)))
160
161
/* Requires that size > 0 */
162
#define INCREMENT(size, ctr) \
163
do { \
164
unsigned increment_i = (size) - 1; \
165
if (++(ctr)[increment_i] == 0) \
166
while (increment_i > 0 \
167
&& ++(ctr)[--increment_i] == 0 ) \
168
; \
169
} while (0)
170
171
172
/* Helper macro for Merkle-Damgård hash functions. Assumes the context
173
structs includes the following fields:
174
175
uint8_t block[...]; // Buffer holding one block
176
unsigned int index; // Index into block
177
*/
178
179
/* Currently used by sha512 (and sha384) only. */
180
#define MD_INCR(ctx) ((ctx)->count_high += !++(ctx)->count_low)
181
182
/* Takes the compression function f as argument. NOTE: also clobbers
183
length and data. */
184
#define MD_UPDATE(ctx, length, data, f, incr) \
185
do { \
186
if ((ctx)->index) \
187
{ \
188
/* Try to fill partial block */
\
189
unsigned __md_left = sizeof((ctx)->block) - (ctx)->index; \
190
if ((length) < __md_left) \
191
{ \
192
memcpy((ctx)->block + (ctx)->index, (data), (length)); \
193
(ctx)->index += (length); \
194
goto __md_done;
/* Finished */
\
195
} \
196
else \
197
{ \
198
memcpy((ctx)->block + (ctx)->index, (data), __md_left); \
199
\
200
f((ctx), (ctx)->block); \
201
(incr); \
202
\
203
(data) += __md_left; \
204
(length) -= __md_left; \
205
} \
206
} \
207
while ((length) >= sizeof((ctx)->block)) \
208
{ \
209
f((ctx), (data)); \
210
(incr); \
211
\
212
(data) += sizeof((ctx)->block); \
213
(length) -= sizeof((ctx)->block); \
214
} \
215
memcpy ((ctx)->block, (data), (length)); \
216
(ctx)->index = (length); \
217
__md_done: \
218
; \
219
} while (0)
220
221
/* Pads the block to a block boundary with the bit pattern 1 0*,
222
leaving size octets for the length field at the end. If needed,
223
compresses the block and starts a new one. */
224
#define MD_PAD(ctx, size, f) \
225
do { \
226
unsigned __md_i; \
227
__md_i = (ctx)->index; \
228
\
229
/* Set the first char of padding to 0x80. This is safe since there \
230
is always at least one byte free */
\
231
\
232
assert(__md_i < sizeof((ctx)->block)); \
233
(ctx)->block[__md_i++] = 0x80; \
234
\
235
if (__md_i > (sizeof((ctx)->block) - (size))) \
236
{
/* No room for length in this block. Process it and \
237
pad with another one */
\
238
memset((ctx)->block + __md_i, 0, sizeof((ctx)->block) - __md_i); \
239
\
240
f((ctx), (ctx)->block); \
241
__md_i = 0; \
242
} \
243
memset((ctx)->block + __md_i, 0, \
244
sizeof((ctx)->block) - (size) - __md_i); \
245
\
246
} while (0)
247
248
#endif
/* NETTLE_MACROS_H_INCLUDED */
subprojects
nettle
macros.h
Generated by
1.9.1