IBR-DTNSuite  0.10
mode_hdr.h
Go to the documentation of this file.
1 /*
2  ---------------------------------------------------------------------------
3  Copyright (c) 1998-2006, Brian Gladman, Worcester, UK. All rights reserved.
4 
5  LICENSE TERMS
6 
7  The free distribution and use of this software in both source and binary
8  form is allowed (with or without changes) provided that:
9 
10  1. distributions of this source code include the above copyright
11  notice, this list of conditions and the following disclaimer;
12 
13  2. distributions in binary form include the above copyright
14  notice, this list of conditions and the following disclaimer
15  in the documentation and/or other associated materials;
16 
17  3. the copyright holder's name is not used to endorse products
18  built using this software without specific written permission.
19 
20  ALTERNATIVELY, provided that this notice is retained in full, this product
21  may be distributed under the terms of the GNU General Public License (GPL),
22  in which case the provisions of the GPL apply INSTEAD OF those given above.
23 
24  DISCLAIMER
25 
26  This software is provided 'as is' with no explicit or implied warranties
27  in respect of its properties, including, but not limited to, correctness
28  and/or fitness for purpose.
29  ---------------------------------------------------------------------------
30  Issue Date: 13/10/2006
31 
32  This header file is an INTERNAL file which supports mode implementation
33 */
34 
35 /* This file changed 5 June 2007 to reflect name change
36  of included file from "aes.h" to "gcm_aes.h"
37  Changed by Peter Lovell, SPARTA Inc., for DTN project.
38 */
39 
40 #ifndef _MODE_HDR_H
41 #define _MODE_HDR_H
42 
43 /* This define sets the units in which buffers are processed. This code
44  can provide significant speed gains if buffers can be processed in
45  32 or 64 bit chunks rather than in bytes. This define sets the units
46  in which buffers will be accessed if possible
47 */
48 #if !defined( BFR_UNIT )
49 # if 1
50 # define BFR_UNIT 64
51 # elif 0
52 # define BFR_UNIT 32
53 # else
54 # define BFR_UNIT 8
55 # endif
56 #endif
57 
58 /* Use of inlines is preferred but code blocks can also be expanded inline
59  using 'defines'. But the latter approach will typically generate a LOT
60  of code and is not recommended.
61 */
62 #if 1 && !defined( USE_INLINING )
63 # define USE_INLINING
64 #endif
65 
66 #include <string.h>
67 #include <limits.h>
68 
69 #if defined( _MSC_VER )
70 # if _MSC_VER >= 1400
71 # include <stdlib.h>
72 # include <intrin.h>
73 # pragma intrinsic(memset)
74 # pragma intrinsic(memcpy)
75 # define rotl32 _rotl
76 # define rotr32 _rotr
77 # define rotl64 _rotl64
78 # define rotr64 _rotl64
79 # define bswap_32(x) _byteswap_ulong(x)
80 # define bswap_64(x) _byteswap_uint64(x)
81 # else
82 # define rotl32 _lrotl
83 # define rotr32 _lrotr
84 # endif
85 #endif
86 
87 #if BFR_UNIT == 64
88 # define NEED_UINT_64T
89 #endif
90 
91 #include "brg_endian.h"
92 #include "brg_types.h"
93 #include "gcm_aes.h"
94 
95 #if defined( USE_INLINING )
96 # if defined( _MSC_VER )
97 # define mh_inline __inline
98 # elif defined( __GNUC__ ) || defined( __GNU_LIBRARY__ )
99 # define mh_inline static inline
100 # else
101 # define mh_inline static
102 # endif
103 #endif
104 
105 #if defined(__cplusplus)
106 extern "C" {
107 #endif
108 
109 #define ui8_ptr(x) ptr_cast(x, 8)
110 #define ui16_ptr(x) ptr_cast(x, 16)
111 #define ui32_ptr(x) ptr_cast(x, 32)
112 #define ui64_ptr(x) ptr_cast(x, 64)
113 #define unit_ptr(x) ptr_cast(x, BFR_UNIT)
114 
115 #define BUF_INC (BFR_UNIT >> 3)
116 #define BUF_ADRMASK ((BFR_UNIT >> 3) - 1)
117 
118 /* function pointers might be used for fast XOR operations */
119 
120 typedef void (*xor_function)(void*, const void* q);
121 
122 /* left and right rotates on 32 and 64 bit variables */
123 
124 #if defined( mh_inline )
125 
126 #if !defined( rotl32 ) // NOTE: 0 <= n <= 32 ASSUMED
127 mh_inline uint_32t rotl32(uint_32t x, int n)
128 {
129  return (((x) << n) | ((x) >> (32 - n)));
130 }
131 #endif
132 
133 #if !defined( rotr32 ) // NOTE: 0 <= n <= 32 ASSUMED
134 mh_inline uint_32t rotr32(uint_32t x, int n)
135 {
136  return (((x) >> n) | ((x) << (32 - n)));
137 }
138 #endif
139 
140 #if !defined( rotl64 ) // NOTE: 0 <= n <= 64 ASSUMED
141 mh_inline uint_64t rotl64(uint_64t x, int n)
142 {
143  return (((x) << n) | ((x) >> (64 - n)));
144 }
145 #endif
146 
147 #if !defined( rotr64 ) // NOTE: 0 <= n <= 64 ASSUMED
148 mh_inline uint_64t rotr64(uint_64t x, int n)
149 {
150  return (((x) >> n) | ((x) << (64 - n)));
151 }
152 #endif
153 
154 /* byte order inversions for 32 and 64 bit variables */
155 
156 #if !defined(bswap_32)
157 mh_inline uint_32t bswap_32(uint_32t x)
158 {
159  return ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00));
160 }
161 #endif
162 
163 #if !defined(bswap_64)
164 mh_inline uint_64t bswap_64(uint_64t x)
165 {
166  return bswap_32((uint_32t)(x >> 32)) | ((uint_64t)bswap_32((uint_32t)x) << 32);
167 }
168 #endif
169 
170 mh_inline void bswap32_block(void* d, const void* s, int n)
171 {
172  while(n--)
173  ((uint_32t*)d)[n] = bswap_32(((uint_32t*)s)[n]);
174 }
175 
176 mh_inline void bswap64_block(void* d, const void* s, int n)
177 {
178  while(n--)
179  ((uint_64t*)d)[n] = bswap_64(((uint_64t*)s)[n]);
180 }
181 
182 #else
183 
184 #if !defined( rotl32 ) // NOTE: 0 <= n <= 32 ASSUMED
185 # define rotl32(x,n) (((x) << n) | ((x) >> (32 - n)))
186 #endif
187 
188 #if !defined( rotr32 ) // NOTE: 0 <= n <= 32 ASSUMED
189 # define rotr32(x,n) (((x) >> n) | ((x) << (32 - n)))
190 #endif
191 
192 #if !defined( rotl64 ) // NOTE: 0 <= n <= 64 ASSUMED
193 # define rotl64(x,n) (((x) << n) | ((x) >> (64 - n)))
194 #endif
195 
196 #if !defined( rotr64 ) // NOTE: 0 <= n <= 64 ASSUMED
197 # define rotr64(x,n) (((x) >> n) | ((x) << (64 - n)))
198 #endif
199 
200 #if !defined(bswap_32)
201 # define bswap_32(x) ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00))
202 #endif
203 
204 #if !defined(bswap_64)
205 # define bswap_64(x) (bswap_32((uint_32t)(x >> 32)) | ((uint_64t)bswap_32((uint_32t)x) << 32))
206 #endif
207 
208 #define bswap32_block(d,s,n) \
209  { int _i = (n); while(_i--) ui32_ptr(d)[_i] = bswap_32(ui32_ptr(s)[_i]); }
210 
211 #define bswap64_block(d,s,n) \
212  { int _i = (n); while(_i--) ui64_ptr(d)[_i] = bswap_64(ui64_ptr(s)[_i]); }
213 
214 #endif
215 
216 /* support for fast aligned buffer move and XOR operations */
217 
218 #if defined( mh_inline )
219 
220 mh_inline void move_block(void* p, const void* q)
221 {
222  memcpy(p, q, 16);
223 }
224 
225 mh_inline void move_block_aligned( void *p, const void *q)
226 {
227 #if BFR_UNIT == 8
228  move_block(p, q);
229 #else
230  unit_ptr(p)[0] = unit_ptr(q)[0]; unit_ptr(p)[1] = unit_ptr(q)[1];
231 # if BFR_UNIT == 32
232  unit_ptr(p)[2] = unit_ptr(q)[2]; unit_ptr(p)[3] = unit_ptr(q)[3];
233 # endif
234 #endif
235 }
236 
237 mh_inline void xor_block(void* p, const void* q)
238 {
239  ui8_ptr(p)[ 0] ^= ui8_ptr(q)[ 0]; ui8_ptr(p)[ 1] ^= ui8_ptr(q)[ 1];
240  ui8_ptr(p)[ 2] ^= ui8_ptr(q)[ 2]; ui8_ptr(p)[ 3] ^= ui8_ptr(q)[ 3];
241  ui8_ptr(p)[ 4] ^= ui8_ptr(q)[ 4]; ui8_ptr(p)[ 5] ^= ui8_ptr(q)[ 5];
242  ui8_ptr(p)[ 6] ^= ui8_ptr(q)[ 6]; ui8_ptr(p)[ 7] ^= ui8_ptr(q)[ 7];
243  ui8_ptr(p)[ 8] ^= ui8_ptr(q)[ 8]; ui8_ptr(p)[ 9] ^= ui8_ptr(q)[ 9];
244  ui8_ptr(p)[10] ^= ui8_ptr(q)[10]; ui8_ptr(p)[11] ^= ui8_ptr(q)[11];
245  ui8_ptr(p)[12] ^= ui8_ptr(q)[12]; ui8_ptr(p)[13] ^= ui8_ptr(q)[13];
246  ui8_ptr(p)[14] ^= ui8_ptr(q)[14]; ui8_ptr(p)[15] ^= ui8_ptr(q)[15];
247 }
248 
249 mh_inline void xor_block_aligned( void *p, const void *q)
250 {
251 #if BFR_UNIT == 8
252  xor_block(p, q);
253 #else
254  unit_ptr(p)[0] ^= unit_ptr(q)[0]; unit_ptr(p)[1] ^= unit_ptr(q)[1];
255 # if BFR_UNIT == 32
256  unit_ptr(p)[2] ^= unit_ptr(q)[2]; unit_ptr(p)[3] ^= unit_ptr(q)[3];
257 # endif
258 #endif
259 }
260 
261 #else
262 
263 #define move_block(p,q) memcpy((p), (q), 16)
264 
265 #if BFR_UNIT == 64
266 # define move_block_aligned(p,q) \
267  ui64_ptr(p)[0] = ui64_ptr(q)[0], ui64_ptr(p)[1] = ui64_ptr(q)[1]
268 #elif BFR_UNIT == 32
269 # define move_block_aligned(p,q) \
270  ui32_ptr(p)[0] = ui32_ptr(q)[0], ui32_ptr(p)[1] = ui32_ptr(q)[1], \
271  ui32_ptr(p)[2] = ui32_ptr(q)[2], ui32_ptr(p)[3] = ui32_ptr(q)[3]
272 #else
273 # define move_block_aligned(p,q) move_block(p,q)
274 #endif
275 
276 #define xor_block(p,q) \
277  ui8_ptr(p)[ 0] ^= ui8_ptr(q)[ 0], ui8_ptr(p)[ 1] ^= ui8_ptr(q)[ 1], \
278  ui8_ptr(p)[ 2] ^= ui8_ptr(q)[ 2], ui8_ptr(p)[ 3] ^= ui8_ptr(q)[ 3], \
279  ui8_ptr(p)[ 4] ^= ui8_ptr(q)[ 4], ui8_ptr(p)[ 5] ^= ui8_ptr(q)[ 5], \
280  ui8_ptr(p)[ 6] ^= ui8_ptr(q)[ 6], ui8_ptr(p)[ 7] ^= ui8_ptr(q)[ 7], \
281  ui8_ptr(p)[ 8] ^= ui8_ptr(q)[ 8], ui8_ptr(p)[ 9] ^= ui8_ptr(q)[ 9], \
282  ui8_ptr(p)[10] ^= ui8_ptr(q)[10], ui8_ptr(p)[11] ^= ui8_ptr(q)[11], \
283  ui8_ptr(p)[12] ^= ui8_ptr(q)[12], ui8_ptr(p)[13] ^= ui8_ptr(q)[13], \
284  ui8_ptr(p)[14] ^= ui8_ptr(q)[14], ui8_ptr(p)[15] ^= ui8_ptr(q)[15]
285 
286 #if BFR_UNIT == 64
287 # define xor_block_aligned(p,q) \
288  ui64_ptr(p)[0] ^= ui64_ptr(q)[0], ui64_ptr(p)[1] ^= ui64_ptr(q)[1]
289 #elif BFR_UNIT == 32
290 # define xor_block_aligned(p,q) \
291  ui32_ptr(p)[0] ^= ui32_ptr(q)[0], ui32_ptr(p)[1] ^= ui32_ptr(q)[1], \
292  ui32_ptr(p)[2] ^= ui32_ptr(q)[2], ui32_ptr(p)[3] ^= ui32_ptr(q)[3]
293 #else
294 # define xor_block_aligned(p,q) xor_block(p,q)
295 #endif
296 
297 #endif
298 
299 /* platform byte order to big or little endian order for 32 and 64 bit variables */
300 
301 #if PLATFORM_BYTE_ORDER == IS_BIG_ENDIAN
302 # define uint_32t_to_le(x) (x) = bswap_32((x))
303 # define uint_64t_to_le(x) (x) = bswap_64((x))
304 # define uint_32t_to_be(x)
305 # define uint_64t_to_be(x)
306 #else
307 # define uint_32t_to_le(x)
308 # define uint_64t_to_le(x)
309 # define uint_32t_to_be(x) (x) = bswap_32((x))
310 # define uint_64t_to_be(x) (x) = bswap_64((x))
311 #endif
312 
313 #if defined(__cplusplus)
314 }
315 #endif
316 
317 #endif