Documentation updates for 0.4.
[paraslash.git] / fec.c
1 /** \file fec.c Forward error correction based on Vandermonde matrices. */
2
3 /*
4 * 980624
5 * (C) 1997-98 Luigi Rizzo (luigi@iet.unipi.it)
6 *
7 * Portions derived from code by Phil Karn (karn@ka9q.ampr.org),
8 * Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari
9 * Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
25 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
27 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
29 * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
31 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
33 * OF SUCH DAMAGE.
34 */
35
36
37 #include "para.h"
38 #include "error.h"
39 #include "portable_io.h"
40 #include "string.h"
41 #include "fec.h"
42
43 #define GF_BITS 8 /* code over GF(256) */
44 #define GF_SIZE ((1 << GF_BITS) - 1)
45
46 /*
47 * To speed up computations, we have tables for logarithm, exponent and inverse
48 * of a number. We use a table for multiplication as well (it takes 64K, no big
49 * deal even on a PDA, especially because it can be pre-initialized an put into
50 * a ROM!). The macro gf_mul(x,y) takes care of multiplications.
51 */
52 static unsigned char gf_exp[2 * GF_SIZE]; /* index->poly form conversion table */
53 static int gf_log[GF_SIZE + 1]; /* Poly->index form conversion table */
54 static unsigned char inverse[GF_SIZE + 1]; /* inverse of field elem. */
55 static unsigned char gf_mul_table[GF_SIZE + 1][GF_SIZE + 1];
56 /* Multiply two numbers. */
57 #define gf_mul(x,y) gf_mul_table[x][y]
58
59 /* Compute x % GF_SIZE without a slow divide. */
60 static inline unsigned char modnn(int x)
61 {
62 while (x >= GF_SIZE) {
63 x -= GF_SIZE;
64 x = (x >> GF_BITS) + (x & GF_SIZE);
65 }
66 return x;
67 }
68
69 static void init_mul_table(void)
70 {
71 int i, j;
72 for (i = 0; i < GF_SIZE + 1; i++)
73 for (j = 0; j < GF_SIZE + 1; j++)
74 gf_mul_table[i][j] =
75 gf_exp[modnn(gf_log[i] + gf_log[j])];
76
77 for (j = 0; j < GF_SIZE + 1; j++)
78 gf_mul_table[0][j] = gf_mul_table[j][0] = 0;
79 }
80
81 static unsigned char *alloc_matrix(int rows, int cols)
82 {
83 return para_malloc(rows * cols);
84 }
85
86 /*
87 * Initialize the data structures used for computations in GF.
88 *
89 * This generates GF(2**GF_BITS) from the irreducible polynomial p(X) in
90 * p[0]..p[m].
91 *
92 * Lookup tables:
93 * index->polynomial form gf_exp[] contains j= \alpha^i;
94 * polynomial form -> index form gf_log[ j = \alpha^i ] = i
95 * \alpha=x is the primitive element of GF(2^m)
96 *
97 * For efficiency, gf_exp[] has size 2*GF_SIZE, so that a simple
98 * multiplication of two numbers can be resolved without calling modnn
99 */
100 static void generate_gf(void)
101 {
102 int i;
103 unsigned char mask = 1;
104 char *pp = "101110001"; /* The primitive polynomial 1+x^2+x^3+x^4+x^8 */
105 gf_exp[GF_BITS] = 0; /* will be updated at the end of the 1st loop */
106
107 /*
108 * first, generate the (polynomial representation of) powers of \alpha,
109 * which are stored in gf_exp[i] = \alpha ** i .
110 * At the same time build gf_log[gf_exp[i]] = i .
111 * The first GF_BITS powers are simply bits shifted to the left.
112 */
113 for (i = 0; i < GF_BITS; i++, mask <<= 1) {
114 gf_exp[i] = mask;
115 gf_log[gf_exp[i]] = i;
116 /*
117 * If pp[i] == 1 then \alpha ** i occurs in poly-repr
118 * gf_exp[GF_BITS] = \alpha ** GF_BITS
119 */
120 if (pp[i] == '1')
121 gf_exp[GF_BITS] ^= mask;
122 }
123 /*
124 * now gf_exp[GF_BITS] = \alpha ** GF_BITS is complete, so can also
125 * compute its inverse.
126 */
127 gf_log[gf_exp[GF_BITS]] = GF_BITS;
128 /*
129 * Poly-repr of \alpha ** (i+1) is given by poly-repr of \alpha ** i
130 * shifted left one-bit and accounting for any \alpha ** GF_BITS term
131 * that may occur when poly-repr of \alpha ** i is shifted.
132 */
133 mask = 1 << (GF_BITS - 1);
134 for (i = GF_BITS + 1; i < GF_SIZE; i++) {
135 if (gf_exp[i - 1] >= mask)
136 gf_exp[i] =
137 gf_exp[GF_BITS] ^ ((gf_exp[i - 1] ^ mask) << 1);
138 else
139 gf_exp[i] = gf_exp[i - 1] << 1;
140 gf_log[gf_exp[i]] = i;
141 }
142 /*
143 * log(0) is not defined, so use a special value
144 */
145 gf_log[0] = GF_SIZE;
146 /* set the extended gf_exp values for fast multiply */
147 for (i = 0; i < GF_SIZE; i++)
148 gf_exp[i + GF_SIZE] = gf_exp[i];
149
150 inverse[0] = 0; /* 0 has no inverse. */
151 inverse[1] = 1;
152 for (i = 2; i <= GF_SIZE; i++)
153 inverse[i] = gf_exp[GF_SIZE - gf_log[i]];
154 }
155
156 /*
157 * Compute dst[] = dst[] + c * src[]
158 *
159 * This is used often, so better optimize it! Currently the loop is unrolled 16
160 * times. The case c=0 is also optimized, whereas c=1 is not.
161 */
162 #define UNROLL 16
163 static void addmul(unsigned char *dst1, const unsigned char const *src1,
164 unsigned char c, int sz)
165 {
166 if (c == 0)
167 return;
168 unsigned char *dst = dst1, *lim = &dst[sz - UNROLL + 1],
169 *col = gf_mul_table[c];
170 const unsigned char const *src = src1;
171
172 for (; dst < lim; dst += UNROLL, src += UNROLL) {
173 dst[0] ^= col[src[0]];
174 dst[1] ^= col[src[1]];
175 dst[2] ^= col[src[2]];
176 dst[3] ^= col[src[3]];
177 dst[4] ^= col[src[4]];
178 dst[5] ^= col[src[5]];
179 dst[6] ^= col[src[6]];
180 dst[7] ^= col[src[7]];
181 dst[8] ^= col[src[8]];
182 dst[9] ^= col[src[9]];
183 dst[10] ^= col[src[10]];
184 dst[11] ^= col[src[11]];
185 dst[12] ^= col[src[12]];
186 dst[13] ^= col[src[13]];
187 dst[14] ^= col[src[14]];
188 dst[15] ^= col[src[15]];
189 }
190 lim += UNROLL - 1;
191 for (; dst < lim; dst++, src++) /* final components */
192 *dst ^= col[*src];
193 }
194
195 /*
196 * Compute C = AB where A is n*k, B is k*m, C is n*m
197 */
198 static void matmul(unsigned char *a, unsigned char *b, unsigned char *c,
199 int n, int k, int m)
200 {
201 int row, col, i;
202
203 for (row = 0; row < n; row++) {
204 for (col = 0; col < m; col++) {
205 unsigned char *pa = &a[row * k], *pb = &b[col], acc = 0;
206 for (i = 0; i < k; i++, pa++, pb += m)
207 acc ^= gf_mul(*pa, *pb);
208 c[row * m + col] = acc;
209 }
210 }
211 }
212
213 #define FEC_SWAP(a,b) {typeof(a) tmp = a; a = b; b = tmp;}
214
215 /*
216 * Compute the inverse of a matrix.
217 *
218 * k is the size of the matrix 'src' (Gauss-Jordan, adapted from Numerical
219 * Recipes in C). Returns negative on errors.
220 */
221 static int invert_mat(unsigned char *src, int k)
222 {
223 int irow, icol, row, col, ix, error;
224 int *indxc = para_malloc(k * sizeof(int));
225 int *indxr = para_malloc(k * sizeof(int));
226 int *ipiv = para_malloc(k * sizeof(int)); /* elements used as pivots */
227 unsigned char c, *p, *id_row = alloc_matrix(1, k),
228 *temp_row = alloc_matrix(1, k);
229
230 memset(id_row, 0, k);
231 memset(ipiv, 0, k * sizeof(int));
232
233 for (col = 0; col < k; col++) {
234 unsigned char *pivot_row;
235 /*
236 * Zeroing column 'col', look for a non-zero element.
237 * First try on the diagonal, if it fails, look elsewhere.
238 */
239 irow = icol = -1;
240 if (ipiv[col] != 1 && src[col * k + col] != 0) {
241 irow = col;
242 icol = col;
243 goto found_piv;
244 }
245 for (row = 0; row < k; row++) {
246 if (ipiv[row] != 1) {
247 for (ix = 0; ix < k; ix++) {
248 if (ipiv[ix] == 0) {
249 if (src[row * k + ix] != 0) {
250 irow = row;
251 icol = ix;
252 goto found_piv;
253 }
254 } else if (ipiv[ix] > 1) {
255 error = -E_FEC_PIVOT;
256 goto fail;
257 }
258 }
259 }
260 }
261 error = -E_FEC_PIVOT;
262 if (icol == -1)
263 goto fail;
264 found_piv:
265 ++(ipiv[icol]);
266 /*
267 * swap rows irow and icol, so afterwards the diagonal element
268 * will be correct. Rarely done, not worth optimizing.
269 */
270 if (irow != icol)
271 for (ix = 0; ix < k; ix++)
272 FEC_SWAP(src[irow * k + ix], src[icol * k + ix]);
273 indxr[col] = irow;
274 indxc[col] = icol;
275 pivot_row = &src[icol * k];
276 error = -E_FEC_SINGULAR;
277 c = pivot_row[icol];
278 if (c == 0)
279 goto fail;
280 if (c != 1) { /* otherwise this is a NOP */
281 /*
282 * this is done often , but optimizing is not so
283 * fruitful, at least in the obvious ways (unrolling)
284 */
285 c = inverse[c];
286 pivot_row[icol] = 1;
287 for (ix = 0; ix < k; ix++)
288 pivot_row[ix] = gf_mul(c, pivot_row[ix]);
289 }
290 /*
291 * from all rows, remove multiples of the selected row to zero
292 * the relevant entry (in fact, the entry is not zero because
293 * we know it must be zero). (Here, if we know that the
294 * pivot_row is the identity, we can optimize the addmul).
295 */
296 id_row[icol] = 1;
297 if (memcmp(pivot_row, id_row, k) != 0) {
298 for (p = src, ix = 0; ix < k; ix++, p += k) {
299 if (ix != icol) {
300 c = p[icol];
301 p[icol] = 0;
302 addmul(p, pivot_row, c, k);
303 }
304 }
305 }
306 id_row[icol] = 0;
307 }
308 for (col = k - 1; col >= 0; col--) {
309 if (indxr[col] < 0 || indxr[col] >= k)
310 PARA_CRIT_LOG("AARGH, indxr[col] %d\n", indxr[col]);
311 else if (indxc[col] < 0 || indxc[col] >= k)
312 PARA_CRIT_LOG("AARGH, indxc[col] %d\n", indxc[col]);
313 else if (indxr[col] != indxc[col]) {
314 for (row = 0; row < k; row++) {
315 FEC_SWAP(src[row * k + indxr[col]],
316 src[row * k + indxc[col]]);
317 }
318 }
319 }
320 error = 0;
321 fail:
322 free(indxc);
323 free(indxr);
324 free(ipiv);
325 free(id_row);
326 free(temp_row);
327 return error;
328 }
329
330 /*
331 * Invert a Vandermonde matrix.
332 *
333 * It assumes that the matrix is not singular and _IS_ a Vandermonde matrix.
334 * Only uses the second column of the matrix, containing the p_i's.
335 *
336 * Algorithm borrowed from "Numerical recipes in C" -- sec.2.8, but largely
337 * revised for GF purposes.
338 */
339 static void invert_vdm(unsigned char *src, int k)
340 {
341 int i, j, row, col;
342 unsigned char *b, *c, *p, t, xx;
343
344 if (k == 1) /* degenerate */
345 return;
346 /*
347 * c holds the coefficient of P(x) = Prod (x - p_i), i=0..k-1
348 * b holds the coefficient for the matrix inversion
349 */
350 c = para_malloc(k);
351 b = para_malloc(k);
352 p = para_malloc(k);
353
354 for (j = 1, i = 0; i < k; i++, j += k) {
355 c[i] = 0;
356 p[i] = src[j];
357 }
358 /*
359 * construct coeffs recursively. We know c[k] = 1 (implicit) and start
360 * P_0 = x - p_0, then at each stage multiply by x - p_i generating P_i
361 * = x P_{i-1} - p_i P_{i-1} After k steps we are done.
362 */
363 c[k - 1] = p[0]; /* really -p(0), but x = -x in GF(2^m) */
364 for (i = 1; i < k; i++) {
365 unsigned char p_i = p[i];
366 for (j = k - 1 - (i - 1); j < k - 1; j++)
367 c[j] ^= gf_mul(p_i, c[j + 1]);
368 c[k - 1] ^= p_i;
369 }
370
371 for (row = 0; row < k; row++) {
372 /*
373 * synthetic division etc.
374 */
375 xx = p[row];
376 t = 1;
377 b[k - 1] = 1; /* this is in fact c[k] */
378 for (i = k - 2; i >= 0; i--) {
379 b[i] = c[i + 1] ^ gf_mul(xx, b[i + 1]);
380 t = gf_mul(xx, t) ^ b[i];
381 }
382 for (col = 0; col < k; col++)
383 src[col * k + row] = gf_mul(inverse[t], b[col]);
384 }
385 free(c);
386 free(b);
387 free(p);
388 }
389
390 static int fec_initialized;
391
392 static void init_fec(void)
393 {
394 generate_gf();
395 init_mul_table();
396 fec_initialized = 1;
397 }
398
399 /** Internal FEC parameters. */
400 struct fec_parms {
401 /** Number of data slices. */
402 int k;
403 /** Number of slices (including redundant slices). */
404 int n;
405 /** The encoding matrix, computed by init_fec(). */
406 unsigned char *enc_matrix;
407 };
408
409 /**
410 * Deallocate a fec params structure.
411 *
412 * \param p The structure to free.
413 */
414 void fec_free(struct fec_parms *p)
415 {
416 if (!p)
417 return;
418 free(p->enc_matrix);
419 free(p);
420 }
421
422 /**
423 * Create a new encoder and return an opaque descriptor to it.
424 *
425 * \param k Number of input slices.
426 * \param n Number of output slices.
427 * \param result On success the Fec descriptor is returned here.
428 *
429 * \return Standard.
430 *
431 * This creates the k*n encoding matrix. It is computed starting with a
432 * Vandermonde matrix, and then transformed into a systematic matrix.
433 */
434 int fec_new(int k, int n, struct fec_parms **result)
435 {
436 int row, col;
437 unsigned char *p, *tmp_m;
438 struct fec_parms *parms;
439
440 if (!fec_initialized)
441 init_fec();
442
443 if (k < 1 || k > GF_SIZE + 1 || n > GF_SIZE + 1 || k > n)
444 return -E_FEC_PARMS;
445 parms = para_malloc(sizeof(struct fec_parms));
446 parms->k = k;
447 parms->n = n;
448 parms->enc_matrix = alloc_matrix(n, k);
449 tmp_m = alloc_matrix(n, k);
450 /*
451 * fill the matrix with powers of field elements, starting from 0.
452 * The first row is special, cannot be computed with exp. table.
453 */
454 tmp_m[0] = 1;
455 for (col = 1; col < k; col++)
456 tmp_m[col] = 0;
457 for (p = tmp_m + k, row = 0; row < n - 1; row++, p += k) {
458 for (col = 0; col < k; col++)
459 p[col] = gf_exp[modnn(row * col)];
460 }
461
462 /*
463 * quick code to build systematic matrix: invert the top
464 * k*k vandermonde matrix, multiply right the bottom n-k rows
465 * by the inverse, and construct the identity matrix at the top.
466 */
467 invert_vdm(tmp_m, k); /* much faster than invert_mat */
468 matmul(tmp_m + k * k, tmp_m, parms->enc_matrix + k * k, n - k, k, k);
469 /*
470 * the upper matrix is I so do not bother with a slow multiply
471 */
472 memset(parms->enc_matrix, 0, k * k);
473 for (p = parms->enc_matrix, col = 0; col < k; col++, p += k + 1)
474 *p = 1;
475 free(tmp_m);
476 *result = parms;
477 return 0;
478 }
479
480 /**
481 * Compute one encoded slice of the given input.
482 *
483 * \param parms The fec parameters returned earlier by fec_new().
484 * \param src The \a k data slices to encode.
485 * \param dst Result pointer.
486 * \param idx The index of the slice to compute.
487 * \param sz The size of the input data packets.
488 *
489 * Encode the \a k slices of size \a sz given by \a src and store the output
490 * slice number \a idx in \a dst.
491 */
492 void fec_encode(struct fec_parms *parms, const unsigned char * const *src,
493 unsigned char *dst, int idx, int sz)
494 {
495 int i, k = parms->k;
496 unsigned char *p;
497
498 assert(idx <= parms->n);
499
500 if (idx < k) {
501 memcpy(dst, src[idx], sz);
502 return;
503 }
504 p = &(parms->enc_matrix[idx * k]);
505 memset(dst, 0, sz);
506 for (i = 0; i < k; i++)
507 addmul(dst, src[i], p[i], sz);
508 }
509
510 /* Move src packets in their position. */
511 static int shuffle(unsigned char **data, int *idx, int k)
512 {
513 int i;
514
515 for (i = 0; i < k;) {
516 if (idx[i] >= k || idx[i] == i)
517 i++;
518 else { /* put index and data at the right position */
519 int c = idx[i];
520
521 if (idx[c] == c) /* conflict */
522 return -E_FEC_BAD_IDX;
523 FEC_SWAP(idx[i], idx[c]);
524 FEC_SWAP(data[i], data[c]);
525 }
526 }
527 return 0;
528 }
529
530 /*
531 * Construct the decoding matrix given the indices. The encoding matrix must
532 * already be allocated.
533 */
534 static int build_decode_matrix(struct fec_parms *parms, int *idx,
535 unsigned char **result)
536 {
537 int ret = -E_FEC_BAD_IDX, i, k = parms->k;
538 unsigned char *p, *matrix = alloc_matrix(k, k);
539
540 for (i = 0, p = matrix; i < k; i++, p += k) {
541 if (idx[i] >= parms->n) /* invalid index */
542 goto err;
543 if (idx[i] < k) {
544 memset(p, 0, k);
545 p[i] = 1;
546 } else
547 memcpy(p, &(parms->enc_matrix[idx[i] * k]), k);
548 }
549 ret = invert_mat(matrix, k);
550 if (ret < 0)
551 goto err;
552 *result = matrix;
553 return 0;
554 err:
555 free(matrix);
556 *result = NULL;
557 return ret;
558 }
559
560 /**
561 * Decode one slice from the group of received slices.
562 *
563 * \param parms Pointer to fec params structure.
564 * \param data Pointers to received packets.
565 * \param idx Pointer to packet indices (gets modified).
566 * \param sz Size of each packet.
567 *
568 * \return Zero on success, -1 on errors.
569 *
570 * The \a data vector of received slices and the indices of slices are used to
571 * produce the correct output slice. The data slices are modified in-place.
572 */
573 int fec_decode(struct fec_parms *parms, unsigned char **data, int *idx,
574 int sz)
575 {
576 unsigned char *m_dec, **slice;
577 int ret, row, col, k = parms->k;
578
579 ret = shuffle(data, idx, k);
580 if (ret < 0)
581 return ret;
582 ret = build_decode_matrix(parms, idx, &m_dec);
583 if (ret < 0)
584 return ret;
585 /* do the actual decoding */
586 slice = para_malloc(k * sizeof(unsigned char *));
587 for (row = 0; row < k; row++) {
588 if (idx[row] >= k) {
589 slice[row] = para_calloc(sz);
590 for (col = 0; col < k; col++)
591 addmul(slice[row], data[col],
592 m_dec[row * k + col], sz);
593 }
594 }
595 /* move slices to their final destination */
596 for (row = 0; row < k; row++) {
597 if (idx[row] >= k) {
598 memcpy(data[row], slice[row], sz);
599 free(slice[row]);
600 }
601 }
602 free(slice);
603 free(m_dec);
604 return 0;
605 }