1 /* gf128mul.c - GF(2^128) multiplication functions
3 * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
4 * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
6 * Based on Dr Brian Gladman's (GPL'd) work published at
7 * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
8 * See the original copyright notice below.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 ---------------------------------------------------------------------------
18 Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
22 The free distribution and use of this software in both source and binary
23 form is allowed (with or without changes) provided that:
25 1. distributions of this source code include the above copyright
26 notice, this list of conditions and the following disclaimer;
28 2. distributions in binary form include the above copyright
29 notice, this list of conditions and the following disclaimer
30 in the documentation and/or other associated materials;
32 3. the copyright holder's name is not used to endorse products
33 built using this software without specific written permission.
35 ALTERNATIVELY, provided that this notice is retained in full, this product
36 may be distributed under the terms of the GNU General Public License (GPL),
37 in which case the provisions of the GPL apply INSTEAD OF those given above.
41 This software is provided 'as is' with no explicit or implied warranties
42 in respect of its properties, including, but not limited to, correctness
43 and/or fitness for purpose.
44 ---------------------------------------------------------------------------
47 This file provides fast multiplication in GF(128) as required by several
48 cryptographic authentication modes
51 #include <crypto/gf128mul.h>
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
56 #define gf128mul_dat(q) { \
57 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
58 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
59 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
60 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
61 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
62 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
63 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
64 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
65 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
66 q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
67 q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
68 q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
69 q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
70 q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
71 q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
72 q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
73 q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
74 q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
75 q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
76 q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
77 q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
78 q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
79 q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
80 q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
81 q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
82 q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
83 q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
84 q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
85 q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
86 q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
87 q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
88 q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
92 * Given a value i in 0..255 as the byte overflow when a field element
93 * in GF(2^128) is multiplied by x^8, the following macro returns the
94 * 16-bit value that must be XOR-ed into the low-degree end of the
95 * product to reduce it modulo the irreducible polynomial x^128 + x^7 +
98 * There are two versions of the macro, and hence two tables: one for
99 * the "be" convention where the highest-order bit is the coefficient of
100 * the highest-degree polynomial term, and one for the "le" convention
101 * where the highest-order bit is the coefficient of the lowest-degree
102 * polynomial term. In both cases the values are stored in CPU byte
103 * endianness such that the coefficients are ordered consistently across
104 * bytes, i.e. in the "be" table bits 15..0 of the stored value
105 * correspond to the coefficients of x^15..x^0, and in the "le" table
106 * bits 15..0 correspond to the coefficients of x^0..x^15.
108 * Therefore, provided that the appropriate byte endianness conversions
109 * are done by the multiplication functions (and these must be in place
110 * anyway to support both little endian and big endian CPUs), the "be"
111 * table can be used for multiplications of both "bbe" and "ble"
112 * elements, and the "le" table can be used for multiplications of both
113 * "lle" and "lbe" elements.
116 #define xda_be(i) ( \
117 (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
118 (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
119 (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
120 (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
123 #define xda_le(i) ( \
124 (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
125 (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
126 (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
127 (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
130 static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
131 static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
133 /* These functions multiply a field element by x, by x^4 and by x^8
134 * in the polynomial field representation. It uses 32-bit word operations
135 * to gain speed but compensates for machine endianess and hence works
136 * correctly on both styles of machine.
139 static void gf128mul_x_lle(be128 *r, const be128 *x)
141 u64 a = be64_to_cpu(x->a);
142 u64 b = be64_to_cpu(x->b);
143 u64 _tt = gf128mul_table_le[(b << 7) & 0xff];
145 r->b = cpu_to_be64((b >> 1) | (a << 63));
146 r->a = cpu_to_be64((a >> 1) ^ (_tt << 48));
149 static void gf128mul_x_bbe(be128 *r, const be128 *x)
151 u64 a = be64_to_cpu(x->a);
152 u64 b = be64_to_cpu(x->b);
153 u64 _tt = gf128mul_table_be[a >> 63];
155 r->a = cpu_to_be64((a << 1) | (b >> 63));
156 r->b = cpu_to_be64((b << 1) ^ _tt);
159 void gf128mul_x_ble(be128 *r, const be128 *x)
161 u64 a = le64_to_cpu(x->a);
162 u64 b = le64_to_cpu(x->b);
163 u64 _tt = gf128mul_table_be[b >> 63];
165 r->a = cpu_to_le64((a << 1) ^ _tt);
166 r->b = cpu_to_le64((b << 1) | (a >> 63));
168 EXPORT_SYMBOL(gf128mul_x_ble);
170 static void gf128mul_x8_lle(be128 *x)
172 u64 a = be64_to_cpu(x->a);
173 u64 b = be64_to_cpu(x->b);
174 u64 _tt = gf128mul_table_le[b & 0xff];
176 x->b = cpu_to_be64((b >> 8) | (a << 56));
177 x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
180 static void gf128mul_x8_bbe(be128 *x)
182 u64 a = be64_to_cpu(x->a);
183 u64 b = be64_to_cpu(x->b);
184 u64 _tt = gf128mul_table_be[a >> 56];
186 x->a = cpu_to_be64((a << 8) | (b >> 56));
187 x->b = cpu_to_be64((b << 8) ^ _tt);
190 void gf128mul_lle(be128 *r, const be128 *b)
196 for (i = 0; i < 7; ++i)
197 gf128mul_x_lle(&p[i + 1], &p[i]);
199 memset(r, 0, sizeof(*r));
201 u8 ch = ((u8 *)b)[15 - i];
204 be128_xor(r, r, &p[0]);
206 be128_xor(r, r, &p[1]);
208 be128_xor(r, r, &p[2]);
210 be128_xor(r, r, &p[3]);
212 be128_xor(r, r, &p[4]);
214 be128_xor(r, r, &p[5]);
216 be128_xor(r, r, &p[6]);
218 be128_xor(r, r, &p[7]);
226 EXPORT_SYMBOL(gf128mul_lle);
228 void gf128mul_bbe(be128 *r, const be128 *b)
234 for (i = 0; i < 7; ++i)
235 gf128mul_x_bbe(&p[i + 1], &p[i]);
237 memset(r, 0, sizeof(*r));
239 u8 ch = ((u8 *)b)[i];
242 be128_xor(r, r, &p[7]);
244 be128_xor(r, r, &p[6]);
246 be128_xor(r, r, &p[5]);
248 be128_xor(r, r, &p[4]);
250 be128_xor(r, r, &p[3]);
252 be128_xor(r, r, &p[2]);
254 be128_xor(r, r, &p[1]);
256 be128_xor(r, r, &p[0]);
264 EXPORT_SYMBOL(gf128mul_bbe);
266 /* This version uses 64k bytes of table space.
267 A 16 byte buffer has to be multiplied by a 16 byte key
268 value in GF(128). If we consider a GF(128) value in
269 the buffer's lowest byte, we can construct a table of
270 the 256 16 byte values that result from the 256 values
271 of this byte. This requires 4096 bytes. But we also
272 need tables for each of the 16 higher bytes in the
273 buffer as well, which makes 64 kbytes in total.
275 /* additional explanation
276 * t[0][BYTE] contains g*BYTE
277 * t[1][BYTE] contains g*x^8*BYTE
279 * t[15][BYTE] contains g*x^120*BYTE */
280 struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g)
282 struct gf128mul_64k *t;
285 t = kzalloc(sizeof(*t), GFP_KERNEL);
289 for (i = 0; i < 16; i++) {
290 t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
292 gf128mul_free_64k(t);
298 t->t[0]->t[128] = *g;
299 for (j = 64; j > 0; j >>= 1)
300 gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]);
303 for (j = 2; j < 256; j += j)
304 for (k = 1; k < j; ++k)
305 be128_xor(&t->t[i]->t[j + k],
306 &t->t[i]->t[j], &t->t[i]->t[k]);
311 for (j = 128; j > 0; j >>= 1) {
312 t->t[i]->t[j] = t->t[i - 1]->t[j];
313 gf128mul_x8_lle(&t->t[i]->t[j]);
320 EXPORT_SYMBOL(gf128mul_init_64k_lle);
322 struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
324 struct gf128mul_64k *t;
327 t = kzalloc(sizeof(*t), GFP_KERNEL);
331 for (i = 0; i < 16; i++) {
332 t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
334 gf128mul_free_64k(t);
341 for (j = 1; j <= 64; j <<= 1)
342 gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
345 for (j = 2; j < 256; j += j)
346 for (k = 1; k < j; ++k)
347 be128_xor(&t->t[i]->t[j + k],
348 &t->t[i]->t[j], &t->t[i]->t[k]);
353 for (j = 128; j > 0; j >>= 1) {
354 t->t[i]->t[j] = t->t[i - 1]->t[j];
355 gf128mul_x8_bbe(&t->t[i]->t[j]);
362 EXPORT_SYMBOL(gf128mul_init_64k_bbe);
364 void gf128mul_free_64k(struct gf128mul_64k *t)
368 for (i = 0; i < 16; i++)
372 EXPORT_SYMBOL(gf128mul_free_64k);
374 void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t)
380 *r = t->t[0]->t[ap[0]];
381 for (i = 1; i < 16; ++i)
382 be128_xor(r, r, &t->t[i]->t[ap[i]]);
385 EXPORT_SYMBOL(gf128mul_64k_lle);
387 void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t)
393 *r = t->t[0]->t[ap[15]];
394 for (i = 1; i < 16; ++i)
395 be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
398 EXPORT_SYMBOL(gf128mul_64k_bbe);
400 /* This version uses 4k bytes of table space.
401 A 16 byte buffer has to be multiplied by a 16 byte key
402 value in GF(128). If we consider a GF(128) value in a
403 single byte, we can construct a table of the 256 16 byte
404 values that result from the 256 values of this byte.
405 This requires 4096 bytes. If we take the highest byte in
406 the buffer and use this table to get the result, we then
407 have to multiply by x^120 to get the final value. For the
408 next highest byte the result has to be multiplied by x^112
409 and so on. But we can do this by accumulating the result
410 in an accumulator starting with the result for the top
411 byte. We repeatedly multiply the accumulator value by
412 x^8 and then add in (i.e. xor) the 16 bytes of the next
413 lower byte in the buffer, stopping when we reach the
414 lowest byte. This requires a 4096 byte table.
416 struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
418 struct gf128mul_4k *t;
421 t = kzalloc(sizeof(*t), GFP_KERNEL);
426 for (j = 64; j > 0; j >>= 1)
427 gf128mul_x_lle(&t->t[j], &t->t[j+j]);
429 for (j = 2; j < 256; j += j)
430 for (k = 1; k < j; ++k)
431 be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
436 EXPORT_SYMBOL(gf128mul_init_4k_lle);
438 struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
440 struct gf128mul_4k *t;
443 t = kzalloc(sizeof(*t), GFP_KERNEL);
448 for (j = 1; j <= 64; j <<= 1)
449 gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
451 for (j = 2; j < 256; j += j)
452 for (k = 1; k < j; ++k)
453 be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
458 EXPORT_SYMBOL(gf128mul_init_4k_bbe);
460 void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t)
469 be128_xor(r, r, &t->t[ap[i]]);
473 EXPORT_SYMBOL(gf128mul_4k_lle);
475 void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t)
484 be128_xor(r, r, &t->t[ap[i]]);
488 EXPORT_SYMBOL(gf128mul_4k_bbe);
490 MODULE_LICENSE("GPL");
491 MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");