IBR-DTNSuite  0.10
gcm.cpp
Go to the documentation of this file.
1 /*
2  ---------------------------------------------------------------------------
3  Copyright (c) 1998-2006, Brian Gladman, Worcester, UK. All rights reserved.
4 
5  LICENSE TERMS
6 
7  The free distribution and use of this software in both source and binary
8  form is allowed (with or without changes) provided that:
9 
10  1. distributions of this source code include the above copyright
11  notice, this list of conditions and the following disclaimer;
12 
13  2. distributions in binary form include the above copyright
14  notice, this list of conditions and the following disclaimer
15  in the documentation and/or other associated materials;
16 
17  3. the copyright holder's name is not used to endorse products
18  built using this software without specific written permission.
19 
20  ALTERNATIVELY, provided that this notice is retained in full, this product
21  may be distributed under the terms of the GNU General Public License (GPL),
22  in which case the provisions of the GPL apply INSTEAD OF those given above.
23 
24  DISCLAIMER
25 
26  This software is provided 'as is' with no explicit or implied warranties
27  in respect of its properties, including, but not limited to, correctness
28  and/or fitness for purpose.
29  ---------------------------------------------------------------------------
30  Issue Date: 13/06/2006
31 
32  My thanks to John Viega and David McGrew for their support in developing
33  this code and to David for testing it on a big-endain system.
34 */
35 
36 //#ifdef HAVE_CONFIG_H
37 //# include <dtn-config.h>
38 //#endif
39 #define BSP_ENABLED true
40 
41 #ifdef BSP_ENABLED
42 
43 #include "gcm.h"
44 #include "mode_hdr.h"
45 
46 #if defined(__cplusplus)
47 extern "C"
48 {
49 #endif
50 
51 #define BLOCK_SIZE GCM_BLOCK_SIZE /* block length */
52 #define BLK_ADR_MASK (BLOCK_SIZE - 1) /* mask for 'in block' address */
53 #define CTR_POS 12
54 
55 #define inc_ctr(x) \
56  { int i = BLOCK_SIZE; while(i-- > CTR_POS && !++(ui8_ptr(x)[i])) ; }
57 
58 ret_type gcm_init_and_key( /* initialise mode and set key */
59  const unsigned char key[], /* the key value */
60  unsigned long key_len, /* and its length in bytes */
61  gcm_ctx ctx[1]) /* the mode context */
62 {
63  memset(ctx->ghash_h, 0, sizeof(ctx->ghash_h));
64 
65  /* set the AES key */
66  aes_encrypt_key(key, key_len, ctx->aes);
67 
68  /* compute E(0) (for the hash function) */
69  aes_encrypt(ui8_ptr(ctx->ghash_h), ui8_ptr(ctx->ghash_h), ctx->aes);
70 
71 #if defined( TABLES_64K )
72  init_64k_table(ui8_ptr(ctx->ghash_h), ctx->gf_t64k);
73 #elif defined( TABLES_8K )
74  init_8k_table(ui8_ptr(ctx->ghash_h), ctx->gf_t8k);
75 #elif defined( TABLES_4K )
76  init_4k_table(ui8_ptr(ctx->ghash_h), ctx->gf_t4k);
77 #elif defined( TABLES_256 )
78  init_256_table(ui8_ptr(ctx->ghash_h), ctx->gf_t256);
79 #endif
80  return RETURN_OK;
81 }
82 
83 #if defined( TABLES_64K )
84 #define gf_mul_hh(a, ctx, scr) gf_mul_64k(a, ctx->gf_t64k, scr)
85 #elif defined( TABLES_8K )
86 #define gf_mul_hh(a, ctx, scr) gf_mul_8k(a, ctx->gf_t8k, scr)
87 #elif defined( TABLES_4K )
88 #define gf_mul_hh(a, ctx, scr) gf_mul_4k(a, ctx->gf_t4k, scr)
89 #elif defined( TABLES_256 )
90 #define gf_mul_hh(a, ctx, scr) gf_mul_256(a, ctx->gf_t256, scr)
91 #else
92 #define gf_mul_hh(a, ctx, scr) gf_mul(a, ui8_ptr(ctx->ghash_h))
93 #endif
94 
95 ret_type gcm_init_message( /* initialise a new message */
96  const unsigned char iv[], /* the initialisation vector */
97  unsigned long iv_len, /* and its length in bytes */
98  gcm_ctx ctx[1]) /* the mode context */
99 { uint_32t i, n_pos = 0, scratch[GF_BYTE_LEN >> 2];
100  uint_8t *p;
101 
102  memset(ctx->ctr_val, 0, BLOCK_SIZE);
103  if(iv_len == CTR_POS)
104  {
105  memcpy(ctx->ctr_val, iv, CTR_POS); ui8_ptr(ctx->ctr_val)[15] = 0x01;
106  }
107  else
108  { n_pos = iv_len;
109  while(n_pos >= BLOCK_SIZE)
110  {
111  xor_block_aligned(ctx->ctr_val, iv);
112  n_pos -= BLOCK_SIZE;
113  iv += BLOCK_SIZE;
114  gf_mul_hh(ui8_ptr(ctx->ctr_val), ctx, scratch);
115  }
116 
117  if(n_pos)
118  {
119  p = ui8_ptr(ctx->ctr_val);
120  while(n_pos-- > 0)
121  *p++ ^= *iv++;
122  gf_mul_hh(ui8_ptr(ctx->ctr_val), ctx, scratch);
123  }
124  n_pos = (iv_len << 3);
125  for(i = BLOCK_SIZE - 1; n_pos; --i, n_pos >>= 8)
126  ui8_ptr(ctx->ctr_val)[i] ^= (unsigned char)n_pos;
127  gf_mul_hh(ui8_ptr(ctx->ctr_val), ctx, scratch);
128  }
129 
130  ctx->y0_val = *ui32_ptr(ui8_ptr(ctx->ctr_val) + CTR_POS);
131  inc_ctr(ctx->ctr_val);
132  memset(ctx->hdr_ghv, 0, BLOCK_SIZE);
133  memset(ctx->txt_ghv, 0, BLOCK_SIZE);
134  ctx->hdr_cnt = 0;
135  ctx->txt_ccnt = ctx->txt_acnt = 0;
136  return RETURN_OK;
137 }
138 
139 ret_type gcm_auth_header( /* authenticate the header */
140  const unsigned char hdr[], /* the header buffer */
141  unsigned long hdr_len, /* and its length in bytes */
142  gcm_ctx ctx[1]) /* the mode context */
143 { uint_32t cnt = 0, b_pos = (uint_32t)ctx->hdr_cnt & BLK_ADR_MASK;
144  uint_32t scratch[GF_BYTE_LEN >> 2];
145 
146  if(!hdr_len)
147  return RETURN_OK;
148 
149  if(ctx->hdr_cnt && b_pos == 0)
150  gf_mul_hh(ui8_ptr(ctx->hdr_ghv), ctx, scratch);
151 
152  while(cnt < hdr_len && (b_pos & BUF_ADRMASK))
153  ui8_ptr(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
154 
155  if(!(b_pos & BUF_ADRMASK) && !((hdr + cnt - ui8_ptr(ctx->hdr_ghv)) & BUF_ADRMASK))
156  {
157  while(cnt + BUF_INC <= hdr_len && b_pos <= BLOCK_SIZE - BUF_INC)
158  {
159  *unit_ptr(ui8_ptr(ctx->hdr_ghv) + b_pos) ^= *unit_ptr(hdr + cnt);
160  cnt += BUF_INC; b_pos += BUF_INC;
161  }
162 
163  while(cnt + BLOCK_SIZE <= hdr_len)
164  {
165  gf_mul_hh(ui8_ptr(ctx->hdr_ghv), ctx, scratch);
166  xor_block_aligned(ctx->hdr_ghv, hdr + cnt);
167  cnt += BLOCK_SIZE;
168  }
169  }
170  else
171  {
172  while(cnt < hdr_len && b_pos < BLOCK_SIZE)
173  ui8_ptr(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
174 
175  while(cnt + BLOCK_SIZE <= hdr_len)
176  {
177  gf_mul_hh(ui8_ptr(ctx->hdr_ghv), ctx, scratch);
178  xor_block(ctx->hdr_ghv, hdr + cnt);
179  cnt += BLOCK_SIZE;
180  }
181  }
182 
183  while(cnt < hdr_len)
184  {
185  if(b_pos == BLOCK_SIZE)
186  {
187  gf_mul_hh(ui8_ptr(ctx->hdr_ghv), ctx, scratch);
188  b_pos = 0;
189  }
190  ui8_ptr(ctx->hdr_ghv)[b_pos++] ^= hdr[cnt++];
191  }
192 
193  ctx->hdr_cnt += cnt;
194  return RETURN_OK;
195 }
196 
197 ret_type gcm_auth_data( /* authenticate ciphertext data */
198  const unsigned char data[], /* the data buffer */
199  unsigned long data_len, /* and its length in bytes */
200  gcm_ctx ctx[1]) /* the mode context */
201 { uint_32t cnt = 0, b_pos = (uint_32t)(ctx->txt_acnt & BLK_ADR_MASK);
202  uint_32t scratch[GF_BYTE_LEN >> 2];
203 
204  if(!data_len)
205  return RETURN_OK;
206 
207  if(ctx->txt_acnt && b_pos == 0)
208  gf_mul_hh(ui8_ptr(ctx->txt_ghv), ctx, scratch);
209 
210  while(cnt < data_len && (b_pos & BUF_ADRMASK))
211  ui8_ptr(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
212 
213  if(!(b_pos & BUF_ADRMASK) && !((data + cnt - ui8_ptr(ctx->txt_ghv)) & BUF_ADRMASK))
214  {
215  while(cnt + BUF_INC <= data_len && b_pos <= BLOCK_SIZE - BUF_INC)
216  {
217  *unit_ptr(ui8_ptr(ctx->txt_ghv) + b_pos) ^= *unit_ptr(data + cnt);
218  cnt += BUF_INC; b_pos += BUF_INC;
219  }
220 
221  while(cnt + BLOCK_SIZE <= data_len)
222  {
223  gf_mul_hh(ui8_ptr(ctx->txt_ghv), ctx, scratch);
224  xor_block_aligned(ctx->txt_ghv, data + cnt);
225  cnt += BLOCK_SIZE;
226  }
227  }
228  else
229  {
230  while(cnt < data_len && b_pos < BLOCK_SIZE)
231  ui8_ptr(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
232 
233  while(cnt + BLOCK_SIZE <= data_len)
234  {
235  gf_mul_hh(ui8_ptr(ctx->txt_ghv), ctx, scratch);
236  xor_block(ctx->txt_ghv, data + cnt);
237  cnt += BLOCK_SIZE;
238  }
239  }
240 
241  while(cnt < data_len)
242  {
243  if(b_pos == BLOCK_SIZE)
244  {
245  gf_mul_hh(ui8_ptr(ctx->txt_ghv), ctx, scratch);
246  b_pos = 0;
247  }
248  ui8_ptr(ctx->txt_ghv)[b_pos++] ^= data[cnt++];
249  }
250 
251  ctx->txt_acnt += cnt;
252  return RETURN_OK;
253 }
254 
255 ret_type gcm_crypt_data( /* encrypt or decrypt data */
256  unsigned char data[], /* the data buffer */
257  unsigned long data_len, /* and its length in bytes */
258  gcm_ctx ctx[1]) /* the mode context */
259 { uint_32t cnt = 0, b_pos = (uint_32t)(ctx->txt_ccnt & BLK_ADR_MASK);
260 
261  if(!data_len)
262  return RETURN_OK;
263 
264  if(b_pos == 0)
265  {
266  aes_encrypt(ui8_ptr(ctx->ctr_val), ui8_ptr(ctx->enc_ctr), ctx->aes);
267  inc_ctr(ctx->ctr_val);
268  }
269 
270  while(cnt < data_len && (b_pos & BUF_ADRMASK))
271  data[cnt++] ^= ui8_ptr(ctx->enc_ctr)[b_pos++];
272 
273  if(!(b_pos & BUF_ADRMASK) && !((data + cnt - ui8_ptr(ctx->enc_ctr)) & BUF_ADRMASK))
274  {
275  while(cnt + BUF_INC <= data_len && b_pos <= BLOCK_SIZE - BUF_INC)
276  {
277  *unit_ptr(data + cnt) ^= *unit_ptr(ui8_ptr(ctx->enc_ctr) + b_pos);
278  cnt += BUF_INC; b_pos += BUF_INC;
279  }
280 
281  while(cnt + BLOCK_SIZE <= data_len)
282  {
283  aes_encrypt(ui8_ptr(ctx->ctr_val), ui8_ptr(ctx->enc_ctr), ctx->aes);
284  inc_ctr(ctx->ctr_val);
285  xor_block_aligned(data + cnt, ctx->enc_ctr);
286  cnt += BLOCK_SIZE;
287  }
288  }
289  else
290  {
291  while(cnt < data_len && b_pos < BLOCK_SIZE)
292  data[cnt++] ^= ui8_ptr(ctx->enc_ctr)[b_pos++];
293 
294  while(cnt + BLOCK_SIZE <= data_len)
295  {
296  aes_encrypt(ui8_ptr(ctx->ctr_val), ui8_ptr(ctx->enc_ctr), ctx->aes);
297  inc_ctr(ctx->ctr_val);
298  xor_block(data + cnt, ctx->enc_ctr);
299  cnt += BLOCK_SIZE;
300  }
301  }
302 
303  while(cnt < data_len)
304  {
305  if(b_pos == BLOCK_SIZE)
306  {
307  aes_encrypt(ui8_ptr(ctx->ctr_val), ui8_ptr(ctx->enc_ctr), ctx->aes);
308  inc_ctr(ctx->ctr_val);
309  b_pos = 0;
310  }
311  data[cnt++] ^= ui8_ptr(ctx->enc_ctr)[b_pos++];
312  }
313 
314  ctx->txt_ccnt += cnt;
315  return RETURN_OK;
316 }
317 
318 ret_type gcm_compute_tag( /* compute authentication tag */
319  unsigned char tag[], /* the buffer for the tag */
320  unsigned long tag_len, /* and its length in bytes */
321  gcm_ctx ctx[1]) /* the mode context */
322 { uint_32t i, ln, scratch[GF_BYTE_LEN >> 2];
323  uint_8t tbuf[BLOCK_SIZE];
324 
325  if(ctx->txt_acnt != ctx->txt_ccnt && ctx->txt_ccnt > 0)
326  return RETURN_ERROR;
327 
328  gf_mul_hh(ui8_ptr(ctx->hdr_ghv), ctx, scratch);
329  gf_mul_hh(ui8_ptr(ctx->txt_ghv), ctx, scratch);
330 
331 #if 1 /* alternative versions of the exponentiation operation */
332  if(ctx->hdr_cnt && (ln = (uint_32t)((ctx->txt_acnt + BLOCK_SIZE - 1) / BLOCK_SIZE)))
333  {
334  memcpy(tbuf, ctx->ghash_h, BLOCK_SIZE);
335  for( ; ; )
336  {
337  if(ln & 1) gf_mul(ui8_ptr(ctx->hdr_ghv), tbuf);
338  if(!(ln >>= 1)) break;
339  gf_mul(tbuf, tbuf);
340  }
341  }
342 #else /* this one seems slower on x86 and x86_64 :-( */
343  if(ctx->hdr_cnt && (ln = (uint_32t)((ctx->txt_acnt + BLOCK_SIZE - 1) / BLOCK_SIZE)))
344  {
345  i = ln | ln >> 1; i |= i >> 2; i |= i >> 4;
346  i |= i >> 8; i |= i >> 16; i = i & ~(i >> 1);
347  memset(tbuf, 0, BLOCK_SIZE);
348  tbuf[0] = 0x80;
349  while(i)
350  {
351  gf_mul(tbuf, tbuf);
352  if(i & ln)
353  gf_mul_hh(tbuf, ctx, scratch);
354  i >>= 1;
355  }
356  gf_mul(ui8_ptr(ctx->hdr_ghv), tbuf);
357  }
358 #endif
359  i = BLOCK_SIZE; ln = (uint_32t)(ctx->txt_acnt << 3);
360  while(i-- > 0)
361  {
362  ui8_ptr(ctx->hdr_ghv)[i] ^= ui8_ptr(ctx->txt_ghv)[i] ^ (unsigned char)ln;
363  ln = (i == 8 ? (uint_32t)(ctx->hdr_cnt << 3) : ln >> 8);
364  }
365 
366  gf_mul_hh(ui8_ptr(ctx->hdr_ghv), ctx, scratch);
367 
368  *ui32_ptr(ui8_ptr(ctx->ctr_val) + CTR_POS) = ctx->y0_val;
369  aes_encrypt(ui8_ptr(ctx->ctr_val), ui8_ptr(ctx->enc_ctr), ctx->aes);
370  for(i = 0; i < (unsigned int)tag_len; ++i)
371  tag[i] = ui8_ptr(ctx->hdr_ghv)[i] ^ ui8_ptr(ctx->enc_ctr)[i];
372 
373  return (ctx->txt_ccnt == ctx->txt_acnt ? RETURN_OK : RETURN_WARN);
374 }
375 
376 ret_type gcm_end( /* clean up and end operation */
377  gcm_ctx ctx[1]) /* the mode context */
378 {
379  memset(ctx, 0, sizeof(gcm_ctx));
380  return RETURN_OK;
381 }
382 
383 ret_type gcm_encrypt( /* encrypt & authenticate data */
384  unsigned char data[], /* the data buffer */
385  unsigned long data_len, /* and its length in bytes */
386  gcm_ctx ctx[1]) /* the mode context */
387 {
388 
389  gcm_crypt_data(data, data_len, ctx);
390  gcm_auth_data(data, data_len, ctx);
391  return RETURN_OK;
392 }
393 
394 ret_type gcm_decrypt( /* authenticate & decrypt data */
395  unsigned char data[], /* the data buffer */
396  unsigned long data_len, /* and its length in bytes */
397  gcm_ctx ctx[1]) /* the mode context */
398 {
399  gcm_auth_data(data, data_len, ctx);
400  gcm_crypt_data(data, data_len, ctx);
401  return RETURN_OK;
402 }
403 
404 ret_type gcm_encrypt_message( /* encrypt an entire message */
405  const unsigned char iv[], /* the initialisation vector */
406  unsigned long iv_len, /* and its length in bytes */
407  const unsigned char hdr[], /* the header buffer */
408  unsigned long hdr_len, /* and its length in bytes */
409  unsigned char msg[], /* the message buffer */
410  unsigned long msg_len, /* and its length in bytes */
411  unsigned char tag[], /* the buffer for the tag */
412  unsigned long tag_len, /* and its length in bytes */
413  gcm_ctx ctx[1]) /* the mode context */
414 {
415  gcm_init_message(iv, iv_len, ctx);
416  gcm_auth_header(hdr, hdr_len, ctx);
417  gcm_encrypt(msg, msg_len, ctx);
418  return gcm_compute_tag(tag, tag_len, ctx) ? RETURN_ERROR : RETURN_OK;
419 }
420 
421 ret_type gcm_decrypt_message( /* decrypt an entire message */
422  const unsigned char iv[], /* the initialisation vector */
423  unsigned long iv_len, /* and its length in bytes */
424  const unsigned char hdr[], /* the header buffer */
425  unsigned long hdr_len, /* and its length in bytes */
426  unsigned char msg[], /* the message buffer */
427  unsigned long msg_len, /* and its length in bytes */
428  const unsigned char tag[], /* the buffer for the tag */
429  unsigned long tag_len, /* and its length in bytes */
430  gcm_ctx ctx[1]) /* the mode context */
431 { uint_8t local_tag[BLOCK_SIZE];
432  ret_type rr;
433 
434  gcm_init_message(iv, iv_len, ctx);
435  gcm_auth_header(hdr, hdr_len, ctx);
436  gcm_decrypt(msg, msg_len, ctx);
437  rr = gcm_compute_tag(local_tag, tag_len, ctx);
438  return (rr != RETURN_OK || memcmp(tag, local_tag, tag_len)) ? RETURN_ERROR : RETURN_OK;
439 }
440 
441 #if defined(__cplusplus)
442 }
443 #endif
444 
445 #endif /* BSP_ENABLED */