summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJay Berkenbilt <ejb@ql.org>2012-12-29 13:06:25 +0100
committerJay Berkenbilt <ejb@ql.org>2012-12-31 11:36:51 +0100
commitc9da66a018b6381eadfa3570d511b2a2341ebae8 (patch)
tree92a82327dd72cd93839741fb529f3a51c800ba0d
parent3680922ae5efbf544afcfa3dd72e2f1db336c45f (diff)
downloadqpdf-c9da66a018b6381eadfa3570d511b2a2341ebae8.tar.zst
Incorporate sha2 code from sphlib 3.0
Changes from upstream are limited to change #include paths so that I can place header files and included "c" files in a subdirectory. I didn't keep the unit tests from sphlib but instead verified them by running them manually. I will implement the same tests using the Pl_SHA2 pipeline except that sphlib's sha2 implementation supports partial bytes, which I will not exercise in qpdf or our tests.
-rw-r--r--README27
-rw-r--r--libqpdf/build.mk15
-rw-r--r--libqpdf/sha2.c690
-rw-r--r--libqpdf/sha2big.c247
-rw-r--r--libqpdf/sph/md_helper.c346
-rw-r--r--libqpdf/sph/sph_sha2.h370
-rw-r--r--libqpdf/sph/sph_types.h1976
7 files changed, 3668 insertions, 3 deletions
diff --git a/README b/README
index 2d82a22a..77e44600 100644
--- a/README
+++ b/README
@@ -48,6 +48,33 @@ obtained from
http://www.efgh.com/software/rijndael.htm
http://www.efgh.com/software/rijndael.txt
+The embedded sha2 code comes from sphlib 3.0
+
+ http://www.saphir2.com/sphlib/
+
+That code has the following license:
+
+ Copyright (c) 2007-2011 Projet RNRT SAPHIR
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
Building on UNIX/Linux
======================
diff --git a/libqpdf/build.mk b/libqpdf/build.mk
index 0ad96a2d..4e0311b0 100644
--- a/libqpdf/build.mk
+++ b/libqpdf/build.mk
@@ -54,18 +54,27 @@ SRCS_libqpdf = \
libqpdf/QUtil.cc \
libqpdf/RC4.cc \
libqpdf/qpdf-c.cc \
- libqpdf/rijndael.cc
+ libqpdf/rijndael.cc \
+ libqpdf/sha2.c \
+ libqpdf/sha2big.c
# -----
-OBJS_libqpdf = $(call src_to_lobj,$(SRCS_libqpdf))
+CCSRCS_libqpdf = $(filter %.cc,$(SRCS_libqpdf))
+CSRCS_libqpdf = $(filter %.c,$(SRCS_libqpdf))
+
+CCOBJS_libqpdf = $(call src_to_lobj,$(CCSRCS_libqpdf))
+COBJS_libqpdf = $(call c_src_to_lobj,$(CSRCS_libqpdf))
+OBJS_libqpdf = $(CCOBJS_libqpdf) $(COBJS_libqpdf)
ifeq ($(GENDEPS),1)
-include $(call lobj_to_dep,$(OBJS_libqpdf))
endif
-$(OBJS_libqpdf): libqpdf/$(OUTPUT_DIR)/%.$(LOBJ): libqpdf/%.cc
+$(CCOBJS_libqpdf): libqpdf/$(OUTPUT_DIR)/%.$(LOBJ): libqpdf/%.cc
$(call libcompile,$<,$(INCLUDES_libqpdf))
+$(COBJS_libqpdf): libqpdf/$(OUTPUT_DIR)/%.$(LOBJ): libqpdf/%.c
+ $(call c_libcompile,$<,$(INCLUDES_libqpdf))
# Last three arguments to makelib are CURRENT,REVISION,AGE.
#
diff --git a/libqpdf/sha2.c b/libqpdf/sha2.c
new file mode 100644
index 00000000..45fdd7e6
--- /dev/null
+++ b/libqpdf/sha2.c
@@ -0,0 +1,690 @@
+/* $Id: sha2.c 227 2010-06-16 17:28:38Z tp $ */
+/*
+ * SHA-224 / SHA-256 implementation.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2007-2010 Projet RNRT SAPHIR
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@cryptolog.com>
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include "sph/sph_sha2.h"
+
+#if SPH_SMALL_FOOTPRINT && !defined SPH_SMALL_FOOTPRINT_SHA2
+#define SPH_SMALL_FOOTPRINT_SHA2 1
+#endif
+
+#define CH(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
+#define MAJ(X, Y, Z) (((Y) & (Z)) | (((Y) | (Z)) & (X)))
+
+#define ROTR SPH_ROTR32
+
+#define BSG2_0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
+#define BSG2_1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
+#define SSG2_0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SPH_T32((x) >> 3))
+#define SSG2_1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SPH_T32((x) >> 10))
+
+static const sph_u32 H224[8] = {
+ SPH_C32(0xC1059ED8), SPH_C32(0x367CD507), SPH_C32(0x3070DD17),
+ SPH_C32(0xF70E5939), SPH_C32(0xFFC00B31), SPH_C32(0x68581511),
+ SPH_C32(0x64F98FA7), SPH_C32(0xBEFA4FA4)
+};
+
+static const sph_u32 H256[8] = {
+ SPH_C32(0x6A09E667), SPH_C32(0xBB67AE85), SPH_C32(0x3C6EF372),
+ SPH_C32(0xA54FF53A), SPH_C32(0x510E527F), SPH_C32(0x9B05688C),
+ SPH_C32(0x1F83D9AB), SPH_C32(0x5BE0CD19)
+};
+
+/*
+ * The SHA2_ROUND_BODY defines the body for a SHA-224 / SHA-256
+ * compression function implementation. The "in" parameter should
+ * evaluate, when applied to a numerical input parameter from 0 to 15,
+ * to an expression which yields the corresponding input block. The "r"
+ * parameter should evaluate to an array or pointer expression
+ * designating the array of 8 words which contains the input and output
+ * of the compression function.
+ */
+
+#if SPH_SMALL_FOOTPRINT_SHA2
+
+static const sph_u32 K[64] = {
+ SPH_C32(0x428A2F98), SPH_C32(0x71374491),
+ SPH_C32(0xB5C0FBCF), SPH_C32(0xE9B5DBA5),
+ SPH_C32(0x3956C25B), SPH_C32(0x59F111F1),
+ SPH_C32(0x923F82A4), SPH_C32(0xAB1C5ED5),
+ SPH_C32(0xD807AA98), SPH_C32(0x12835B01),
+ SPH_C32(0x243185BE), SPH_C32(0x550C7DC3),
+ SPH_C32(0x72BE5D74), SPH_C32(0x80DEB1FE),
+ SPH_C32(0x9BDC06A7), SPH_C32(0xC19BF174),
+ SPH_C32(0xE49B69C1), SPH_C32(0xEFBE4786),
+ SPH_C32(0x0FC19DC6), SPH_C32(0x240CA1CC),
+ SPH_C32(0x2DE92C6F), SPH_C32(0x4A7484AA),
+ SPH_C32(0x5CB0A9DC), SPH_C32(0x76F988DA),
+ SPH_C32(0x983E5152), SPH_C32(0xA831C66D),
+ SPH_C32(0xB00327C8), SPH_C32(0xBF597FC7),
+ SPH_C32(0xC6E00BF3), SPH_C32(0xD5A79147),
+ SPH_C32(0x06CA6351), SPH_C32(0x14292967),
+ SPH_C32(0x27B70A85), SPH_C32(0x2E1B2138),
+ SPH_C32(0x4D2C6DFC), SPH_C32(0x53380D13),
+ SPH_C32(0x650A7354), SPH_C32(0x766A0ABB),
+ SPH_C32(0x81C2C92E), SPH_C32(0x92722C85),
+ SPH_C32(0xA2BFE8A1), SPH_C32(0xA81A664B),
+ SPH_C32(0xC24B8B70), SPH_C32(0xC76C51A3),
+ SPH_C32(0xD192E819), SPH_C32(0xD6990624),
+ SPH_C32(0xF40E3585), SPH_C32(0x106AA070),
+ SPH_C32(0x19A4C116), SPH_C32(0x1E376C08),
+ SPH_C32(0x2748774C), SPH_C32(0x34B0BCB5),
+ SPH_C32(0x391C0CB3), SPH_C32(0x4ED8AA4A),
+ SPH_C32(0x5B9CCA4F), SPH_C32(0x682E6FF3),
+ SPH_C32(0x748F82EE), SPH_C32(0x78A5636F),
+ SPH_C32(0x84C87814), SPH_C32(0x8CC70208),
+ SPH_C32(0x90BEFFFA), SPH_C32(0xA4506CEB),
+ SPH_C32(0xBEF9A3F7), SPH_C32(0xC67178F2)
+};
+
+#define SHA2_MEXP1(in, pc) do { \
+ W[pc] = in(pc); \
+ } while (0)
+
+#define SHA2_MEXP2(in, pc) do { \
+ W[(pc) & 0x0F] = SPH_T32(SSG2_1(W[((pc) - 2) & 0x0F]) \
+ + W[((pc) - 7) & 0x0F] \
+ + SSG2_0(W[((pc) - 15) & 0x0F]) + W[(pc) & 0x0F]); \
+ } while (0)
+
+#define SHA2_STEPn(n, a, b, c, d, e, f, g, h, in, pc) do { \
+ sph_u32 t1, t2; \
+ SHA2_MEXP ## n(in, pc); \
+ t1 = SPH_T32(h + BSG2_1(e) + CH(e, f, g) \
+ + K[pcount + (pc)] + W[(pc) & 0x0F]); \
+ t2 = SPH_T32(BSG2_0(a) + MAJ(a, b, c)); \
+ d = SPH_T32(d + t1); \
+ h = SPH_T32(t1 + t2); \
+ } while (0)
+
+#define SHA2_STEP1(a, b, c, d, e, f, g, h, in, pc) \
+ SHA2_STEPn(1, a, b, c, d, e, f, g, h, in, pc)
+#define SHA2_STEP2(a, b, c, d, e, f, g, h, in, pc) \
+ SHA2_STEPn(2, a, b, c, d, e, f, g, h, in, pc)
+
+#define SHA2_ROUND_BODY(in, r) do { \
+ sph_u32 A, B, C, D, E, F, G, H; \
+ sph_u32 W[16]; \
+ unsigned pcount; \
+ \
+ A = (r)[0]; \
+ B = (r)[1]; \
+ C = (r)[2]; \
+ D = (r)[3]; \
+ E = (r)[4]; \
+ F = (r)[5]; \
+ G = (r)[6]; \
+ H = (r)[7]; \
+ pcount = 0; \
+ SHA2_STEP1(A, B, C, D, E, F, G, H, in, 0); \
+ SHA2_STEP1(H, A, B, C, D, E, F, G, in, 1); \
+ SHA2_STEP1(G, H, A, B, C, D, E, F, in, 2); \
+ SHA2_STEP1(F, G, H, A, B, C, D, E, in, 3); \
+ SHA2_STEP1(E, F, G, H, A, B, C, D, in, 4); \
+ SHA2_STEP1(D, E, F, G, H, A, B, C, in, 5); \
+ SHA2_STEP1(C, D, E, F, G, H, A, B, in, 6); \
+ SHA2_STEP1(B, C, D, E, F, G, H, A, in, 7); \
+ SHA2_STEP1(A, B, C, D, E, F, G, H, in, 8); \
+ SHA2_STEP1(H, A, B, C, D, E, F, G, in, 9); \
+ SHA2_STEP1(G, H, A, B, C, D, E, F, in, 10); \
+ SHA2_STEP1(F, G, H, A, B, C, D, E, in, 11); \
+ SHA2_STEP1(E, F, G, H, A, B, C, D, in, 12); \
+ SHA2_STEP1(D, E, F, G, H, A, B, C, in, 13); \
+ SHA2_STEP1(C, D, E, F, G, H, A, B, in, 14); \
+ SHA2_STEP1(B, C, D, E, F, G, H, A, in, 15); \
+ for (pcount = 16; pcount < 64; pcount += 16) { \
+ SHA2_STEP2(A, B, C, D, E, F, G, H, in, 0); \
+ SHA2_STEP2(H, A, B, C, D, E, F, G, in, 1); \
+ SHA2_STEP2(G, H, A, B, C, D, E, F, in, 2); \
+ SHA2_STEP2(F, G, H, A, B, C, D, E, in, 3); \
+ SHA2_STEP2(E, F, G, H, A, B, C, D, in, 4); \
+ SHA2_STEP2(D, E, F, G, H, A, B, C, in, 5); \
+ SHA2_STEP2(C, D, E, F, G, H, A, B, in, 6); \
+ SHA2_STEP2(B, C, D, E, F, G, H, A, in, 7); \
+ SHA2_STEP2(A, B, C, D, E, F, G, H, in, 8); \
+ SHA2_STEP2(H, A, B, C, D, E, F, G, in, 9); \
+ SHA2_STEP2(G, H, A, B, C, D, E, F, in, 10); \
+ SHA2_STEP2(F, G, H, A, B, C, D, E, in, 11); \
+ SHA2_STEP2(E, F, G, H, A, B, C, D, in, 12); \
+ SHA2_STEP2(D, E, F, G, H, A, B, C, in, 13); \
+ SHA2_STEP2(C, D, E, F, G, H, A, B, in, 14); \
+ SHA2_STEP2(B, C, D, E, F, G, H, A, in, 15); \
+ } \
+ (r)[0] = SPH_T32((r)[0] + A); \
+ (r)[1] = SPH_T32((r)[1] + B); \
+ (r)[2] = SPH_T32((r)[2] + C); \
+ (r)[3] = SPH_T32((r)[3] + D); \
+ (r)[4] = SPH_T32((r)[4] + E); \
+ (r)[5] = SPH_T32((r)[5] + F); \
+ (r)[6] = SPH_T32((r)[6] + G); \
+ (r)[7] = SPH_T32((r)[7] + H); \
+ } while (0)
+
+#else
+
+#define SHA2_ROUND_BODY(in, r) do { \
+ sph_u32 A, B, C, D, E, F, G, H, T1, T2; \
+ sph_u32 W00, W01, W02, W03, W04, W05, W06, W07; \
+ sph_u32 W08, W09, W10, W11, W12, W13, W14, W15; \
+ \
+ A = (r)[0]; \
+ B = (r)[1]; \
+ C = (r)[2]; \
+ D = (r)[3]; \
+ E = (r)[4]; \
+ F = (r)[5]; \
+ G = (r)[6]; \
+ H = (r)[7]; \
+ W00 = in(0); \
+ T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
+ + SPH_C32(0x428A2F98) + W00); \
+ T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
+ D = SPH_T32(D + T1); \
+ H = SPH_T32(T1 + T2); \
+ W01 = in(1); \
+ T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
+ + SPH_C32(0x71374491) + W01); \
+ T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
+ C = SPH_T32(C + T1); \
+ G = SPH_T32(T1 + T2); \
+ W02 = in(2); \
+ T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
+ + SPH_C32(0xB5C0FBCF) + W02); \
+ T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
+ B = SPH_T32(B + T1); \
+ F = SPH_T32(T1 + T2); \
+ W03 = in(3); \
+ T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
+ + SPH_C32(0xE9B5DBA5) + W03); \
+ T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
+ A = SPH_T32(A + T1); \
+ E = SPH_T32(T1 + T2); \
+ W04 = in(4); \
+ T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
+ + SPH_C32(0x3956C25B) + W04); \
+ T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
+ H = SPH_T32(H + T1); \
+ D = SPH_T32(T1 + T2); \
+ W05 = in(5); \
+ T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
+ + SPH_C32(0x59F111F1) + W05); \
+ T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
+ G = SPH_T32(G + T1); \
+ C = SPH_T32(T1 + T2); \
+ W06 = in(6); \
+ T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
+ + SPH_C32(0x923F82A4) + W06); \
+ T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
+ F = SPH_T32(F + T1); \
+ B = SPH_T32(T1 + T2); \
+ W07 = in(7); \
+ T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
+ + SPH_C32(0xAB1C5ED5) + W07); \
+ T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
+ E = SPH_T32(E + T1); \
+ A = SPH_T32(T1 + T2); \
+ W08 = in(8); \
+ T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
+ + SPH_C32(0xD807AA98) + W08); \
+ T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
+ D = SPH_T32(D + T1); \
+ H = SPH_T32(T1 + T2); \
+ W09 = in(9); \
+ T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
+ + SPH_C32(0x12835B01) + W09); \
+ T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
+ C = SPH_T32(C + T1); \
+ G = SPH_T32(T1 + T2); \
+ W10 = in(10); \
+ T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
+ + SPH_C32(0x243185BE) + W10); \
+ T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
+ B = SPH_T32(B + T1); \
+ F = SPH_T32(T1 + T2); \
+ W11 = in(11); \
+ T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
+ + SPH_C32(0x550C7DC3) + W11); \
+ T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
+ A = SPH_T32(A + T1); \
+ E = SPH_T32(T1 + T2); \
+ W12 = in(12); \
+ T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
+ + SPH_C32(0x72BE5D74) + W12); \
+ T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
+ H = SPH_T32(H + T1); \
+ D = SPH_T32(T1 + T2); \
+ W13 = in(13); \
+ T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
+ + SPH_C32(0x80DEB1FE) + W13); \
+ T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
+ G = SPH_T32(G + T1); \
+ C = SPH_T32(T1 + T2); \
+ W14 = in(14); \
+ T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
+ + SPH_C32(0x9BDC06A7) + W14); \
+ T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
+ F = SPH_T32(F + T1); \
+ B = SPH_T32(T1 + T2); \
+ W15 = in(15); \
+ T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
+ + SPH_C32(0xC19BF174) + W15); \
+ T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
+ E = SPH_T32(E + T1); \
+ A = SPH_T32(T1 + T2); \
+ W00 = SPH_T32(SSG2_1(W14) + W09 + SSG2_0(W01) + W00); \
+ T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
+ + SPH_C32(0xE49B69C1) + W00); \
+ T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
+ D = SPH_T32(D + T1); \
+ H = SPH_T32(T1 + T2); \
+ W01 = SPH_T32(SSG2_1(W15) + W10 + SSG2_0(W02) + W01); \
+ T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
+ + SPH_C32(0xEFBE4786) + W01); \
+ T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
+ C = SPH_T32(C + T1); \
+ G = SPH_T32(T1 + T2); \
+ W02 = SPH_T32(SSG2_1(W00) + W11 + SSG2_0(W03) + W02); \
+ T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
+ + SPH_C32(0x0FC19DC6) + W02); \
+ T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
+ B = SPH_T32(B + T1); \
+ F = SPH_T32(T1 + T2); \
+ W03 = SPH_T32(SSG2_1(W01) + W12 + SSG2_0(W04) + W03); \
+ T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
+ + SPH_C32(0x240CA1CC) + W03); \
+ T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
+ A = SPH_T32(A + T1); \
+ E = SPH_T32(T1 + T2); \
+ W04 = SPH_T32(SSG2_1(W02) + W13 + SSG2_0(W05) + W04); \
+ T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
+ + SPH_C32(0x2DE92C6F) + W04); \
+ T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
+ H = SPH_T32(H + T1); \
+ D = SPH_T32(T1 + T2); \
+ W05 = SPH_T32(SSG2_1(W03) + W14 + SSG2_0(W06) + W05); \
+ T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
+ + SPH_C32(0x4A7484AA) + W05); \
+ T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
+ G = SPH_T32(G + T1); \
+ C = SPH_T32(T1 + T2); \
+ W06 = SPH_T32(SSG2_1(W04) + W15 + SSG2_0(W07) + W06); \
+ T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
+ + SPH_C32(0x5CB0A9DC) + W06); \
+ T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
+ F = SPH_T32(F + T1); \
+ B = SPH_T32(T1 + T2); \
+ W07 = SPH_T32(SSG2_1(W05) + W00 + SSG2_0(W08) + W07); \
+ T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
+ + SPH_C32(0x76F988DA) + W07); \
+ T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
+ E = SPH_T32(E + T1); \
+ A = SPH_T32(T1 + T2); \
+ W08 = SPH_T32(SSG2_1(W06) + W01 + SSG2_0(W09) + W08); \
+ T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
+ + SPH_C32(0x983E5152) + W08); \
+ T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
+ D = SPH_T32(D + T1); \
+ H = SPH_T32(T1 + T2); \
+ W09 = SPH_T32(SSG2_1(W07) + W02 + SSG2_0(W10) + W09); \
+ T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
+ + SPH_C32(0xA831C66D) + W09); \
+ T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
+ C = SPH_T32(C + T1); \
+ G = SPH_T32(T1 + T2); \
+ W10 = SPH_T32(SSG2_1(W08) + W03 + SSG2_0(W11) + W10); \
+ T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
+ + SPH_C32(0xB00327C8) + W10); \
+ T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
+ B = SPH_T32(B + T1); \
+ F = SPH_T32(T1 + T2); \
+ W11 = SPH_T32(SSG2_1(W09) + W04 + SSG2_0(W12) + W11); \
+ T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
+ + SPH_C32(0xBF597FC7) + W11); \
+ T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
+ A = SPH_T32(A + T1); \
+ E = SPH_T32(T1 + T2); \
+ W12 = SPH_T32(SSG2_1(W10) + W05 + SSG2_0(W13) + W12); \
+ T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
+ + SPH_C32(0xC6E00BF3) + W12); \
+ T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
+ H = SPH_T32(H + T1); \
+ D = SPH_T32(T1 + T2); \
+ W13 = SPH_T32(SSG2_1(W11) + W06 + SSG2_0(W14) + W13); \
+ T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
+ + SPH_C32(0xD5A79147) + W13); \
+ T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
+ G = SPH_T32(G + T1); \
+ C = SPH_T32(T1 + T2); \
+ W14 = SPH_T32(SSG2_1(W12) + W07 + SSG2_0(W15) + W14); \
+ T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
+ + SPH_C32(0x06CA6351) + W14); \
+ T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
+ F = SPH_T32(F + T1); \
+ B = SPH_T32(T1 + T2); \
+ W15 = SPH_T32(SSG2_1(W13) + W08 + SSG2_0(W00) + W15); \
+ T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
+ + SPH_C32(0x14292967) + W15); \
+ T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
+ E = SPH_T32(E + T1); \
+ A = SPH_T32(T1 + T2); \
+ W00 = SPH_T32(SSG2_1(W14) + W09 + SSG2_0(W01) + W00); \
+ T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
+ + SPH_C32(0x27B70A85) + W00); \
+ T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
+ D = SPH_T32(D + T1); \
+ H = SPH_T32(T1 + T2); \
+ W01 = SPH_T32(SSG2_1(W15) + W10 + SSG2_0(W02) + W01); \
+ T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
+ + SPH_C32(0x2E1B2138) + W01); \
+ T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
+ C = SPH_T32(C + T1); \
+ G = SPH_T32(T1 + T2); \
+ W02 = SPH_T32(SSG2_1(W00) + W11 + SSG2_0(W03) + W02); \
+ T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
+ + SPH_C32(0x4D2C6DFC) + W02); \
+ T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
+ B = SPH_T32(B + T1); \
+ F = SPH_T32(T1 + T2); \
+ W03 = SPH_T32(SSG2_1(W01) + W12 + SSG2_0(W04) + W03); \
+ T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
+ + SPH_C32(0x53380D13) + W03); \
+ T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
+ A = SPH_T32(A + T1); \
+ E = SPH_T32(T1 + T2); \
+ W04 = SPH_T32(SSG2_1(W02) + W13 + SSG2_0(W05) + W04); \
+ T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
+ + SPH_C32(0x650A7354) + W04); \
+ T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
+ H = SPH_T32(H + T1); \
+ D = SPH_T32(T1 + T2); \
+ W05 = SPH_T32(SSG2_1(W03) + W14 + SSG2_0(W06) + W05); \
+ T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
+ + SPH_C32(0x766A0ABB) + W05); \
+ T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
+ G = SPH_T32(G + T1); \
+ C = SPH_T32(T1 + T2); \
+ W06 = SPH_T32(SSG2_1(W04) + W15 + SSG2_0(W07) + W06); \
+ T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
+ + SPH_C32(0x81C2C92E) + W06); \
+ T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
+ F = SPH_T32(F + T1); \
+ B = SPH_T32(T1 + T2); \
+ W07 = SPH_T32(SSG2_1(W05) + W00 + SSG2_0(W08) + W07); \
+ T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
+ + SPH_C32(0x92722C85) + W07); \
+ T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
+ E = SPH_T32(E + T1); \
+ A = SPH_T32(T1 + T2); \
+ W08 = SPH_T32(SSG2_1(W06) + W01 + SSG2_0(W09) + W08); \
+ T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
+ + SPH_C32(0xA2BFE8A1) + W08); \
+ T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
+ D = SPH_T32(D + T1); \
+ H = SPH_T32(T1 + T2); \
+ W09 = SPH_T32(SSG2_1(W07) + W02 + SSG2_0(W10) + W09); \
+ T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
+ + SPH_C32(0xA81A664B) + W09); \
+ T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
+ C = SPH_T32(C + T1); \
+ G = SPH_T32(T1 + T2); \
+ W10 = SPH_T32(SSG2_1(W08) + W03 + SSG2_0(W11) + W10); \
+ T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
+ + SPH_C32(0xC24B8B70) + W10); \
+ T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
+ B = SPH_T32(B + T1); \
+ F = SPH_T32(T1 + T2); \
+ W11 = SPH_T32(SSG2_1(W09) + W04 + SSG2_0(W12) + W11); \
+ T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
+ + SPH_C32(0xC76C51A3) + W11); \
+ T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
+ A = SPH_T32(A + T1); \
+ E = SPH_T32(T1 + T2); \
+ W12 = SPH_T32(SSG2_1(W10) + W05 + SSG2_0(W13) + W12); \
+ T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
+ + SPH_C32(0xD192E819) + W12); \
+ T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
+ H = SPH_T32(H + T1); \
+ D = SPH_T32(T1 + T2); \
+ W13 = SPH_T32(SSG2_1(W11) + W06 + SSG2_0(W14) + W13); \
+ T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
+ + SPH_C32(0xD6990624) + W13); \
+ T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
+ G = SPH_T32(G + T1); \
+ C = SPH_T32(T1 + T2); \
+ W14 = SPH_T32(SSG2_1(W12) + W07 + SSG2_0(W15) + W14); \
+ T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
+ + SPH_C32(0xF40E3585) + W14); \
+ T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
+ F = SPH_T32(F + T1); \
+ B = SPH_T32(T1 + T2); \
+ W15 = SPH_T32(SSG2_1(W13) + W08 + SSG2_0(W00) + W15); \
+ T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
+ + SPH_C32(0x106AA070) + W15); \
+ T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
+ E = SPH_T32(E + T1); \
+ A = SPH_T32(T1 + T2); \
+ W00 = SPH_T32(SSG2_1(W14) + W09 + SSG2_0(W01) + W00); \
+ T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
+ + SPH_C32(0x19A4C116) + W00); \
+ T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
+ D = SPH_T32(D + T1); \
+ H = SPH_T32(T1 + T2); \
+ W01 = SPH_T32(SSG2_1(W15) + W10 + SSG2_0(W02) + W01); \
+ T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
+ + SPH_C32(0x1E376C08) + W01); \
+ T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
+ C = SPH_T32(C + T1); \
+ G = SPH_T32(T1 + T2); \
+ W02 = SPH_T32(SSG2_1(W00) + W11 + SSG2_0(W03) + W02); \
+ T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
+ + SPH_C32(0x2748774C) + W02); \
+ T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
+ B = SPH_T32(B + T1); \
+ F = SPH_T32(T1 + T2); \
+ W03 = SPH_T32(SSG2_1(W01) + W12 + SSG2_0(W04) + W03); \
+ T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
+ + SPH_C32(0x34B0BCB5) + W03); \
+ T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
+ A = SPH_T32(A + T1); \
+ E = SPH_T32(T1 + T2); \
+ W04 = SPH_T32(SSG2_1(W02) + W13 + SSG2_0(W05) + W04); \
+ T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
+ + SPH_C32(0x391C0CB3) + W04); \
+ T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
+ H = SPH_T32(H + T1); \
+ D = SPH_T32(T1 + T2); \
+ W05 = SPH_T32(SSG2_1(W03) + W14 + SSG2_0(W06) + W05); \
+ T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
+ + SPH_C32(0x4ED8AA4A) + W05); \
+ T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
+ G = SPH_T32(G + T1); \
+ C = SPH_T32(T1 + T2); \
+ W06 = SPH_T32(SSG2_1(W04) + W15 + SSG2_0(W07) + W06); \
+ T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
+ + SPH_C32(0x5B9CCA4F) + W06); \
+ T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
+ F = SPH_T32(F + T1); \
+ B = SPH_T32(T1 + T2); \
+ W07 = SPH_T32(SSG2_1(W05) + W00 + SSG2_0(W08) + W07); \
+ T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
+ + SPH_C32(0x682E6FF3) + W07); \
+ T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
+ E = SPH_T32(E + T1); \
+ A = SPH_T32(T1 + T2); \
+ W08 = SPH_T32(SSG2_1(W06) + W01 + SSG2_0(W09) + W08); \
+ T1 = SPH_T32(H + BSG2_1(E) + CH(E, F, G) \
+ + SPH_C32(0x748F82EE) + W08); \
+ T2 = SPH_T32(BSG2_0(A) + MAJ(A, B, C)); \
+ D = SPH_T32(D + T1); \
+ H = SPH_T32(T1 + T2); \
+ W09 = SPH_T32(SSG2_1(W07) + W02 + SSG2_0(W10) + W09); \
+ T1 = SPH_T32(G + BSG2_1(D) + CH(D, E, F) \
+ + SPH_C32(0x78A5636F) + W09); \
+ T2 = SPH_T32(BSG2_0(H) + MAJ(H, A, B)); \
+ C = SPH_T32(C + T1); \
+ G = SPH_T32(T1 + T2); \
+ W10 = SPH_T32(SSG2_1(W08) + W03 + SSG2_0(W11) + W10); \
+ T1 = SPH_T32(F + BSG2_1(C) + CH(C, D, E) \
+ + SPH_C32(0x84C87814) + W10); \
+ T2 = SPH_T32(BSG2_0(G) + MAJ(G, H, A)); \
+ B = SPH_T32(B + T1); \
+ F = SPH_T32(T1 + T2); \
+ W11 = SPH_T32(SSG2_1(W09) + W04 + SSG2_0(W12) + W11); \
+ T1 = SPH_T32(E + BSG2_1(B) + CH(B, C, D) \
+ + SPH_C32(0x8CC70208) + W11); \
+ T2 = SPH_T32(BSG2_0(F) + MAJ(F, G, H)); \
+ A = SPH_T32(A + T1); \
+ E = SPH_T32(T1 + T2); \
+ W12 = SPH_T32(SSG2_1(W10) + W05 + SSG2_0(W13) + W12); \
+ T1 = SPH_T32(D + BSG2_1(A) + CH(A, B, C) \
+ + SPH_C32(0x90BEFFFA) + W12); \
+ T2 = SPH_T32(BSG2_0(E) + MAJ(E, F, G)); \
+ H = SPH_T32(H + T1); \
+ D = SPH_T32(T1 + T2); \
+ W13 = SPH_T32(SSG2_1(W11) + W06 + SSG2_0(W14) + W13); \
+ T1 = SPH_T32(C + BSG2_1(H) + CH(H, A, B) \
+ + SPH_C32(0xA4506CEB) + W13); \
+ T2 = SPH_T32(BSG2_0(D) + MAJ(D, E, F)); \
+ G = SPH_T32(G + T1); \
+ C = SPH_T32(T1 + T2); \
+ W14 = SPH_T32(SSG2_1(W12) + W07 + SSG2_0(W15) + W14); \
+ T1 = SPH_T32(B + BSG2_1(G) + CH(G, H, A) \
+ + SPH_C32(0xBEF9A3F7) + W14); \
+ T2 = SPH_T32(BSG2_0(C) + MAJ(C, D, E)); \
+ F = SPH_T32(F + T1); \
+ B = SPH_T32(T1 + T2); \
+ W15 = SPH_T32(SSG2_1(W13) + W08 + SSG2_0(W00) + W15); \
+ T1 = SPH_T32(A + BSG2_1(F) + CH(F, G, H) \
+ + SPH_C32(0xC67178F2) + W15); \
+ T2 = SPH_T32(BSG2_0(B) + MAJ(B, C, D)); \
+ E = SPH_T32(E + T1); \
+ A = SPH_T32(T1 + T2); \
+ (r)[0] = SPH_T32((r)[0] + A); \
+ (r)[1] = SPH_T32((r)[1] + B); \
+ (r)[2] = SPH_T32((r)[2] + C); \
+ (r)[3] = SPH_T32((r)[3] + D); \
+ (r)[4] = SPH_T32((r)[4] + E); \
+ (r)[5] = SPH_T32((r)[5] + F); \
+ (r)[6] = SPH_T32((r)[6] + G); \
+ (r)[7] = SPH_T32((r)[7] + H); \
+ } while (0)
+
+#endif
+
+/*
+ * One round of SHA-224 / SHA-256. The data must be aligned for 32-bit access.
+ */
+static void
+sha2_round(const unsigned char *data, sph_u32 r[8])
+{
+#define SHA2_IN(x) sph_dec32be_aligned(data + (4 * (x)))
+ SHA2_ROUND_BODY(SHA2_IN, r);
+#undef SHA2_IN
+}
+
+/* see sph_sha2.h */
+void
+sph_sha224_init(void *cc)
+{
+ sph_sha224_context *sc;
+
+ sc = cc;
+ memcpy(sc->val, H224, sizeof H224);
+#if SPH_64
+ sc->count = 0;
+#else
+ sc->count_high = sc->count_low = 0;
+#endif
+}
+
+/* see sph_sha2.h */
+void
+sph_sha256_init(void *cc)
+{
+ sph_sha256_context *sc;
+
+ sc = cc;
+ memcpy(sc->val, H256, sizeof H256);
+#if SPH_64
+ sc->count = 0;
+#else
+ sc->count_high = sc->count_low = 0;
+#endif
+}
+
+#define RFUN sha2_round
+#define HASH sha224
+#define BE32 1
+#include "sph/md_helper.c"
+
+/* see sph_sha2.h */
+void
+sph_sha224_close(void *cc, void *dst)
+{
+ sha224_close(cc, dst, 7);
+ sph_sha224_init(cc);
+}
+
+/* see sph_sha2.h */
+void
+sph_sha224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
+{
+ sha224_addbits_and_close(cc, ub, n, dst, 7);
+ sph_sha224_init(cc);
+}
+
+/* see sph_sha2.h */
+void
+sph_sha256_close(void *cc, void *dst)
+{
+ sha224_close(cc, dst, 8);
+ sph_sha256_init(cc);
+}
+
+/* see sph_sha2.h */
+void
+sph_sha256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
+{
+ sha224_addbits_and_close(cc, ub, n, dst, 8);
+ sph_sha256_init(cc);
+}
+
+/* see sph_sha2.h */
+void
+sph_sha224_comp(const sph_u32 msg[16], sph_u32 val[8])
+{
+#define SHA2_IN(x) msg[x]
+ SHA2_ROUND_BODY(SHA2_IN, val);
+#undef SHA2_IN
+}
diff --git a/libqpdf/sha2big.c b/libqpdf/sha2big.c
new file mode 100644
index 00000000..e4aadbdb
--- /dev/null
+++ b/libqpdf/sha2big.c
@@ -0,0 +1,247 @@
+/* $Id: sha2big.c 216 2010-06-08 09:46:57Z tp $ */
+/*
+ * SHA-384 / SHA-512 implementation.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2007-2010 Projet RNRT SAPHIR
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@cryptolog.com>
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include "sph/sph_sha2.h"
+
+#if SPH_64
+
+#define CH(X, Y, Z) ((((Y) ^ (Z)) & (X)) ^ (Z))
+#define MAJ(X, Y, Z) (((X) & (Y)) | (((X) | (Y)) & (Z)))
+
+#define ROTR64 SPH_ROTR64
+
+#define BSG5_0(x) (ROTR64(x, 28) ^ ROTR64(x, 34) ^ ROTR64(x, 39))
+#define BSG5_1(x) (ROTR64(x, 14) ^ ROTR64(x, 18) ^ ROTR64(x, 41))
+#define SSG5_0(x) (ROTR64(x, 1) ^ ROTR64(x, 8) ^ SPH_T64((x) >> 7))
+#define SSG5_1(x) (ROTR64(x, 19) ^ ROTR64(x, 61) ^ SPH_T64((x) >> 6))
+
+static const sph_u64 K512[80] = {
+ SPH_C64(0x428A2F98D728AE22), SPH_C64(0x7137449123EF65CD),
+ SPH_C64(0xB5C0FBCFEC4D3B2F), SPH_C64(0xE9B5DBA58189DBBC),
+ SPH_C64(0x3956C25BF348B538), SPH_C64(0x59F111F1B605D019),
+ SPH_C64(0x923F82A4AF194F9B), SPH_C64(0xAB1C5ED5DA6D8118),
+ SPH_C64(0xD807AA98A3030242), SPH_C64(0x12835B0145706FBE),
+ SPH_C64(0x243185BE4EE4B28C), SPH_C64(0x550C7DC3D5FFB4E2),
+ SPH_C64(0x72BE5D74F27B896F), SPH_C64(0x80DEB1FE3B1696B1),
+ SPH_C64(0x9BDC06A725C71235), SPH_C64(0xC19BF174CF692694),
+ SPH_C64(0xE49B69C19EF14AD2), SPH_C64(0xEFBE4786384F25E3),
+ SPH_C64(0x0FC19DC68B8CD5B5), SPH_C64(0x240CA1CC77AC9C65),
+ SPH_C64(0x2DE92C6F592B0275), SPH_C64(0x4A7484AA6EA6E483),
+ SPH_C64(0x5CB0A9DCBD41FBD4), SPH_C64(0x76F988DA831153B5),
+ SPH_C64(0x983E5152EE66DFAB), SPH_C64(0xA831C66D2DB43210),
+ SPH_C64(0xB00327C898FB213F), SPH_C64(0xBF597FC7BEEF0EE4),
+ SPH_C64(0xC6E00BF33DA88FC2), SPH_C64(0xD5A79147930AA725),
+ SPH_C64(0x06CA6351E003826F), SPH_C64(0x142929670A0E6E70),
+ SPH_C64(0x27B70A8546D22FFC), SPH_C64(0x2E1B21385C26C926),
+ SPH_C64(0x4D2C6DFC5AC42AED), SPH_C64(0x53380D139D95B3DF),
+ SPH_C64(0x650A73548BAF63DE), SPH_C64(0x766A0ABB3C77B2A8),
+ SPH_C64(0x81C2C92E47EDAEE6), SPH_C64(0x92722C851482353B),
+ SPH_C64(0xA2BFE8A14CF10364), SPH_C64(0xA81A664BBC423001),
+ SPH_C64(0xC24B8B70D0F89791), SPH_C64(0xC76C51A30654BE30),
+ SPH_C64(0xD192E819D6EF5218), SPH_C64(0xD69906245565A910),
+ SPH_C64(0xF40E35855771202A), SPH_C64(0x106AA07032BBD1B8),
+ SPH_C64(0x19A4C116B8D2D0C8), SPH_C64(0x1E376C085141AB53),
+ SPH_C64(0x2748774CDF8EEB99), SPH_C64(0x34B0BCB5E19B48A8),
+ SPH_C64(0x391C0CB3C5C95A63), SPH_C64(0x4ED8AA4AE3418ACB),
+ SPH_C64(0x5B9CCA4F7763E373), SPH_C64(0x682E6FF3D6B2B8A3),
+ SPH_C64(0x748F82EE5DEFB2FC), SPH_C64(0x78A5636F43172F60),
+ SPH_C64(0x84C87814A1F0AB72), SPH_C64(0x8CC702081A6439EC),
+ SPH_C64(0x90BEFFFA23631E28), SPH_C64(0xA4506CEBDE82BDE9),
+ SPH_C64(0xBEF9A3F7B2C67915), SPH_C64(0xC67178F2E372532B),
+ SPH_C64(0xCA273ECEEA26619C), SPH_C64(0xD186B8C721C0C207),
+ SPH_C64(0xEADA7DD6CDE0EB1E), SPH_C64(0xF57D4F7FEE6ED178),
+ SPH_C64(0x06F067AA72176FBA), SPH_C64(0x0A637DC5A2C898A6),
+ SPH_C64(0x113F9804BEF90DAE), SPH_C64(0x1B710B35131C471B),
+ SPH_C64(0x28DB77F523047D84), SPH_C64(0x32CAAB7B40C72493),
+ SPH_C64(0x3C9EBE0A15C9BEBC), SPH_C64(0x431D67C49C100D4C),
+ SPH_C64(0x4CC5D4BECB3E42B6), SPH_C64(0x597F299CFC657E2A),
+ SPH_C64(0x5FCB6FAB3AD6FAEC), SPH_C64(0x6C44198C4A475817)
+};
+
+static const sph_u64 H384[8] = {
+ SPH_C64(0xCBBB9D5DC1059ED8), SPH_C64(0x629A292A367CD507),
+ SPH_C64(0x9159015A3070DD17), SPH_C64(0x152FECD8F70E5939),
+ SPH_C64(0x67332667FFC00B31), SPH_C64(0x8EB44A8768581511),
+ SPH_C64(0xDB0C2E0D64F98FA7), SPH_C64(0x47B5481DBEFA4FA4)
+};
+
+static const sph_u64 H512[8] = {
+ SPH_C64(0x6A09E667F3BCC908), SPH_C64(0xBB67AE8584CAA73B),
+ SPH_C64(0x3C6EF372FE94F82B), SPH_C64(0xA54FF53A5F1D36F1),
+ SPH_C64(0x510E527FADE682D1), SPH_C64(0x9B05688C2B3E6C1F),
+ SPH_C64(0x1F83D9ABFB41BD6B), SPH_C64(0x5BE0CD19137E2179)
+};
+
+/*
+ * This macro defines the body for a SHA-384 / SHA-512 compression function
+ * implementation. The "in" parameter should evaluate, when applied to a
+ * numerical input parameter from 0 to 15, to an expression which yields
+ * the corresponding input block. The "r" parameter should evaluate to
+ * an array or pointer expression designating the array of 8 words which
+ * contains the input and output of the compression function.
+ *
+ * SHA-512 is hard for the compiler. If the loop is completely unrolled,
+ * then the code will be quite huge (possibly more than 100 kB), and the
+ * performance will be degraded due to cache misses on the code. We
+ * unroll only eight steps, which avoids all needless copies when
+ * 64-bit registers are swapped.
+ */
+
+#define SHA3_STEP(A, B, C, D, E, F, G, H, i) do { \
+ sph_u64 T1, T2; \
+ T1 = SPH_T64(H + BSG5_1(E) + CH(E, F, G) + K512[i] + W[i]); \
+ T2 = SPH_T64(BSG5_0(A) + MAJ(A, B, C)); \
+ D = SPH_T64(D + T1); \
+ H = SPH_T64(T1 + T2); \
+ } while (0)
+
+#define SHA3_ROUND_BODY(in, r) do { \
+ int i; \
+ sph_u64 A, B, C, D, E, F, G, H; \
+ sph_u64 W[80]; \
+ \
+ for (i = 0; i < 16; i ++) \
+ W[i] = in(i); \
+ for (i = 16; i < 80; i ++) \
+ W[i] = SPH_T64(SSG5_1(W[i - 2]) + W[i - 7] \
+ + SSG5_0(W[i - 15]) + W[i - 16]); \
+ A = (r)[0]; \
+ B = (r)[1]; \
+ C = (r)[2]; \
+ D = (r)[3]; \
+ E = (r)[4]; \
+ F = (r)[5]; \
+ G = (r)[6]; \
+ H = (r)[7]; \
+ for (i = 0; i < 80; i += 8) { \
+ SHA3_STEP(A, B, C, D, E, F, G, H, i + 0); \
+ SHA3_STEP(H, A, B, C, D, E, F, G, i + 1); \
+ SHA3_STEP(G, H, A, B, C, D, E, F, i + 2); \
+ SHA3_STEP(F, G, H, A, B, C, D, E, i + 3); \
+ SHA3_STEP(E, F, G, H, A, B, C, D, i + 4); \
+ SHA3_STEP(D, E, F, G, H, A, B, C, i + 5); \
+ SHA3_STEP(C, D, E, F, G, H, A, B, i + 6); \
+ SHA3_STEP(B, C, D, E, F, G, H, A, i + 7); \
+ } \
+ (r)[0] = SPH_T64((r)[0] + A); \
+ (r)[1] = SPH_T64((r)[1] + B); \
+ (r)[2] = SPH_T64((r)[2] + C); \
+ (r)[3] = SPH_T64((r)[3] + D); \
+ (r)[4] = SPH_T64((r)[4] + E); \
+ (r)[5] = SPH_T64((r)[5] + F); \
+ (r)[6] = SPH_T64((r)[6] + G); \
+ (r)[7] = SPH_T64((r)[7] + H); \
+ } while (0)
+
+/*
+ * One round of SHA-384 / SHA-512. The data must be aligned for 64-bit access.
+ */
+static void
+sha3_round(const unsigned char *data, sph_u64 r[8])
+{
+#define SHA3_IN(x) sph_dec64be_aligned(data + (8 * (x)))
+ SHA3_ROUND_BODY(SHA3_IN, r);
+#undef SHA3_IN
+}
+
+/* see sph_sha3.h */
+void
+sph_sha384_init(void *cc)
+{
+ sph_sha384_context *sc;
+
+ sc = cc;
+ memcpy(sc->val, H384, sizeof H384);
+ sc->count = 0;
+}
+
+/* see sph_sha3.h */
+void
+sph_sha512_init(void *cc)
+{
+ sph_sha512_context *sc;
+
+ sc = cc;
+ memcpy(sc->val, H512, sizeof H512);
+ sc->count = 0;
+}
+
+#define RFUN sha3_round
+#define HASH sha384
+#define BE64 1
+#include "sph/md_helper.c"
+
+/* see sph_sha3.h */
+void
+sph_sha384_close(void *cc, void *dst)
+{
+ sha384_close(cc, dst, 6);
+ sph_sha384_init(cc);
+}
+
+/* see sph_sha3.h */
+void
+sph_sha384_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
+{
+ sha384_addbits_and_close(cc, ub, n, dst, 6);
+ sph_sha384_init(cc);
+}
+
+/* see sph_sha3.h */
+void
+sph_sha512_close(void *cc, void *dst)
+{
+ sha384_close(cc, dst, 8);
+ sph_sha512_init(cc);
+}
+
+/* see sph_sha3.h */
+void
+sph_sha512_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst)
+{
+ sha384_addbits_and_close(cc, ub, n, dst, 8);
+ sph_sha512_init(cc);
+}
+
+/* see sph_sha3.h */
+void
+sph_sha384_comp(const sph_u64 msg[16], sph_u64 val[8])
+{
+#define SHA3_IN(x) msg[x]
+ SHA3_ROUND_BODY(SHA3_IN, val);
+#undef SHA3_IN
+}
+
+#endif
diff --git a/libqpdf/sph/md_helper.c b/libqpdf/sph/md_helper.c
new file mode 100644
index 00000000..5384f03f
--- /dev/null
+++ b/libqpdf/sph/md_helper.c
@@ -0,0 +1,346 @@
+/* $Id: md_helper.c 216 2010-06-08 09:46:57Z tp $ */
+/*
+ * This file contains some functions which implement the external data
+ * handling and padding for Merkle-Damgard hash functions which follow
+ * the conventions set out by MD4 (little-endian) or SHA-1 (big-endian).
+ *
+ * API: this file is meant to be included, not compiled as a stand-alone
+ * file. Some macros must be defined:
+ * RFUN name for the round function
+ * HASH "short name" for the hash function
+ * BE32 defined for big-endian, 32-bit based (e.g. SHA-1)
+ * LE32 defined for little-endian, 32-bit based (e.g. MD5)
+ * BE64 defined for big-endian, 64-bit based (e.g. SHA-512)
+ * LE64 defined for little-endian, 64-bit based (no example yet)
+ * PW01 if defined, append 0x01 instead of 0x80 (for Tiger)
+ * BLEN if defined, length of a message block (in bytes)
+ * PLW1 if defined, length is defined on one 64-bit word only (for Tiger)
+ * PLW4 if defined, length is defined on four 64-bit words (for WHIRLPOOL)
+ * SVAL if defined, reference to the context state information
+ *
+ * BLEN is used when a message block is not 16 (32-bit or 64-bit) words:
+ * this is used for instance for Tiger, which works on 64-bit words but
+ * uses 512-bit message blocks (eight 64-bit words). PLW1 and PLW4 are
+ * ignored if 32-bit words are used; if 64-bit words are used and PLW1 is
+ * set, then only one word (64 bits) will be used to encode the input
+ * message length (in bits), otherwise two words will be used (as in
+ * SHA-384 and SHA-512). If 64-bit words are used and PLW4 is defined (but
+ * not PLW1), four 64-bit words will be used to encode the message length
+ * (in bits). Note that regardless of those settings, only 64-bit message
+ * lengths are supported (in bits): messages longer than 2 Exabytes will be
+ * improperly hashed (this is unlikely to happen soon: 2 Exabytes is about
+ * 2 millions Terabytes, which is huge).
+ *
+ * If CLOSE_ONLY is defined, then this file defines only the sph_XXX_close()
+ * function. This is used for Tiger2, which is identical to Tiger except
+ * when it comes to the padding (Tiger2 uses the standard 0x80 byte instead
+ * of the 0x01 from original Tiger).
+ *
+ * The RFUN function is invoked with two arguments, the first pointing to
+ * aligned data (as a "const void *"), the second being state information
+ * from the context structure. By default, this state information is the
+ * "val" field from the context, and this field is assumed to be an array
+ * of words ("sph_u32" or "sph_u64", depending on BE32/LE32/BE64/LE64).
+ * from the context structure. The "val" field can have any type, except
+ * for the output encoding which assumes that it is an array of "sph_u32"
+ * values. By defining NO_OUTPUT, this last step is deactivated; the
+ * includer code is then responsible for writing out the hash result. When
+ * NO_OUTPUT is defined, the third parameter to the "close()" function is
+ * ignored.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2007-2010 Projet RNRT SAPHIR
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @author Thomas Pornin <thomas.pornin@cryptolog.com>
+ */
+
+#ifdef _MSC_VER
+#pragma warning (disable: 4146)
+#endif
+
+#undef SPH_XCAT
+#define SPH_XCAT(a, b) SPH_XCAT_(a, b)
+#undef SPH_XCAT_
+#define SPH_XCAT_(a, b) a ## b
+
+#undef SPH_BLEN
+#undef SPH_WLEN
+#if defined BE64 || defined LE64
+#define SPH_BLEN 128U
+#define SPH_WLEN 8U
+#else
+#define SPH_BLEN 64U
+#define SPH_WLEN 4U
+#endif
+
+#ifdef BLEN
+#undef SPH_BLEN
+#define SPH_BLEN BLEN
+#endif
+
+#undef SPH_MAXPAD
+#if defined PLW1
+#define SPH_MAXPAD (SPH_BLEN - SPH_WLEN)
+#elif defined PLW4
+#define SPH_MAXPAD (SPH_BLEN - (SPH_WLEN << 2))
+#else
+#define SPH_MAXPAD (SPH_BLEN - (SPH_WLEN << 1))
+#endif
+
+#undef SPH_VAL
+#undef SPH_NO_OUTPUT
+#ifdef SVAL
+#define SPH_VAL SVAL
+#define SPH_NO_OUTPUT 1
+#else
+#define SPH_VAL sc->val
+#endif
+
+#ifndef CLOSE_ONLY
+
+#ifdef SPH_UPTR
+static void
+SPH_XCAT(HASH, _short)(void *cc, const void *data, size_t len)
+#else
+void
+SPH_XCAT(sph_, HASH)(void *cc, const void *data, size_t len)
+#endif
+{
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current;
+
+ sc = cc;
+#if SPH_64
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
+#else
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+#endif
+ while (len > 0) {
+ unsigned clen;
+#if !SPH_64
+ sph_u32 clow, clow2;
+#endif
+
+ clen = SPH_BLEN - current;
+ if (clen > len)
+ clen = len;
+ memcpy(sc->buf + current, data, clen);
+ data = (const unsigned char *)data + clen;
+ current += clen;
+ len -= clen;
+ if (current == SPH_BLEN) {
+ RFUN(sc->buf, SPH_VAL);
+ current = 0;
+ }
+#if SPH_64
+ sc->count += clen;
+#else
+ clow = sc->count_low;
+ clow2 = SPH_T32(clow + clen);
+ sc->count_low = clow2;
+ if (clow2 < clow)
+ sc->count_high ++;
+#endif
+ }
+}
+
+#ifdef SPH_UPTR
+void
+SPH_XCAT(sph_, HASH)(void *cc, const void *data, size_t len)
+{
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current;
+ size_t orig_len;
+#if !SPH_64
+ sph_u32 clow, clow2;
+#endif
+
+ if (len < (2 * SPH_BLEN)) {
+ SPH_XCAT(HASH, _short)(cc, data, len);
+ return;
+ }
+ sc = cc;
+#if SPH_64
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
+#else
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+#endif
+ if (current > 0) {
+ unsigned t;
+
+ t = SPH_BLEN - current;
+ SPH_XCAT(HASH, _short)(cc, data, t);
+ data = (const unsigned char *)data + t;
+ len -= t;
+ }
+#if !SPH_UNALIGNED
+ if (((SPH_UPTR)data & (SPH_WLEN - 1U)) != 0) {
+ SPH_XCAT(HASH, _short)(cc, data, len);
+ return;
+ }
+#endif
+ orig_len = len;
+ while (len >= SPH_BLEN) {
+ RFUN(data, SPH_VAL);
+ len -= SPH_BLEN;
+ data = (const unsigned char *)data + SPH_BLEN;
+ }
+ if (len > 0)
+ memcpy(sc->buf, data, len);
+#if SPH_64
+ sc->count += (sph_u64)orig_len;
+#else
+ clow = sc->count_low;
+ clow2 = SPH_T32(clow + orig_len);
+ sc->count_low = clow2;
+ if (clow2 < clow)
+ sc->count_high ++;
+ /*
+ * This code handles the improbable situation where "size_t" is
+ * greater than 32 bits, and yet we do not have a 64-bit type.
+ */
+ orig_len >>= 12;
+ orig_len >>= 10;
+ orig_len >>= 10;
+ sc->count_high += orig_len;
+#endif
+}
+#endif
+
+#endif
+
+/*
+ * Perform padding and produce result. The context is NOT reinitialized
+ * by this function.
+ */
+static void
+SPH_XCAT(HASH, _addbits_and_close)(void *cc,
+ unsigned ub, unsigned n, void *dst, unsigned rnum)
+{
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current, u;
+#if !SPH_64
+ sph_u32 low, high;
+#endif
+
+ sc = cc;
+#if SPH_64
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
+#else
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+#endif
+#ifdef PW01
+ sc->buf[current ++] = (0x100 | (ub & 0xFF)) >> (8 - n);
+#else
+ {
+ unsigned z;
+
+ z = 0x80 >> n;
+ sc->buf[current ++] = ((ub & -z) | z) & 0xFF;
+ }
+#endif
+ if (current > SPH_MAXPAD) {
+ memset(sc->buf + current, 0, SPH_BLEN - current);
+ RFUN(sc->buf, SPH_VAL);
+ memset(sc->buf, 0, SPH_MAXPAD);
+ } else {
+ memset(sc->buf + current, 0, SPH_MAXPAD - current);
+ }
+#if defined BE64
+#if defined PLW1
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+#elif defined PLW4
+ memset(sc->buf + SPH_MAXPAD, 0, 2 * SPH_WLEN);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN,
+ sc->count >> 61);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 3 * SPH_WLEN,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+#else
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD, sc->count >> 61);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+#endif
+#elif defined LE64
+#if defined PLW1
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+#elif defined PLW1
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
+ memset(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN, 0, 2 * SPH_WLEN);
+#else
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
+#endif
+#else
+#if SPH_64
+#ifdef BE32
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+#else
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+#endif
+#else
+ low = sc->count_low;
+ high = SPH_T32((sc->count_high << 3) | (low >> 29));
+ low = SPH_T32(low << 3) + (sph_u32)n;
+#ifdef BE32
+ sph_enc32be(sc->buf + SPH_MAXPAD, high);
+ sph_enc32be(sc->buf + SPH_MAXPAD + SPH_WLEN, low);
+#else
+ sph_enc32le(sc->buf + SPH_MAXPAD, low);
+ sph_enc32le(sc->buf + SPH_MAXPAD + SPH_WLEN, high);
+#endif
+#endif
+#endif
+ RFUN(sc->buf, SPH_VAL);
+#ifdef SPH_NO_OUTPUT
+ (void)dst;
+ (void)rnum;
+ (void)u;
+#else
+ for (u = 0; u < rnum; u ++) {
+#if defined BE64
+ sph_enc64be((unsigned char *)dst + 8 * u, sc->val[u]);
+#elif defined LE64
+ sph_enc64le((unsigned char *)dst + 8 * u, sc->val[u]);
+#elif defined BE32
+ sph_enc32be((unsigned char *)dst + 4 * u, sc->val[u]);
+#else
+ sph_enc32le((unsigned char *)dst + 4 * u, sc->val[u]);
+#endif
+ }
+#endif
+}
+
+static void
+SPH_XCAT(HASH, _close)(void *cc, void *dst, unsigned rnum)
+{
+ SPH_XCAT(HASH, _addbits_and_close)(cc, 0, 0, dst, rnum);
+}
diff --git a/libqpdf/sph/sph_sha2.h b/libqpdf/sph/sph_sha2.h
new file mode 100644
index 00000000..d5bda731
--- /dev/null
+++ b/libqpdf/sph/sph_sha2.h
@@ -0,0 +1,370 @@
+/* $Id: sph_sha2.h 216 2010-06-08 09:46:57Z tp $ */
+/**
+ * SHA-224, SHA-256, SHA-384 and SHA-512 interface.
+ *
+ * SHA-256 has been published in FIPS 180-2, now amended with a change
+ * notice to include SHA-224 as well (which is a simple variation on
+ * SHA-256). SHA-384 and SHA-512 are also defined in FIPS 180-2. FIPS
+ * standards can be found at:
+ * http://csrc.nist.gov/publications/fips/
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2007-2010 Projet RNRT SAPHIR
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @file sph_sha2.h
+ * @author Thomas Pornin <thomas.pornin@cryptolog.com>
+ */
+
+#ifndef SPH_SHA2_H__
+#define SPH_SHA2_H__
+
+#include <stddef.h>
+#include "sph_types.h"
+
+/**
+ * Output size (in bits) for SHA-224.
+ */
+#define SPH_SIZE_sha224 224
+
+/**
+ * Output size (in bits) for SHA-256.
+ */
+#define SPH_SIZE_sha256 256
+
+/**
+ * This structure is a context for SHA-224 computations: it contains the
+ * intermediate values and some data from the last entered block. Once
+ * a SHA-224 computation has been performed, the context can be reused for
+ * another computation.
+ *
+ * The contents of this structure are private. A running SHA-224 computation
+ * can be cloned by copying the context (e.g. with a simple
+ * <code>memcpy()</code>).
+ */
+typedef struct {
+#ifndef DOXYGEN_IGNORE
+ unsigned char buf[64]; /* first field, for alignment */
+ sph_u32 val[8];
+#if SPH_64
+ sph_u64 count;
+#else
+ sph_u32 count_high, count_low;
+#endif
+#endif
+} sph_sha224_context;
+
+/**
+ * This structure is a context for SHA-256 computations. It is identical
+ * to the SHA-224 context. However, a context is initialized for SHA-224
+ * <strong>or</strong> SHA-256, but not both (the internal IV is not the
+ * same).
+ */
+typedef sph_sha224_context sph_sha256_context;
+
+/**
+ * Initialize a SHA-224 context. This process performs no memory allocation.
+ *
+ * @param cc the SHA-224 context (pointer to
+ * a <code>sph_sha224_context</code>)
+ */
+void sph_sha224_init(void *cc);
+
+/**
+ * Process some data bytes. It is acceptable that <code>len</code> is zero
+ * (in which case this function does nothing).
+ *
+ * @param cc the SHA-224 context
+ * @param data the input data
+ * @param len the input data length (in bytes)
+ */
+void sph_sha224(void *cc, const void *data, size_t len);
+
+/**
+ * Terminate the current SHA-224 computation and output the result into the
+ * provided buffer. The destination buffer must be wide enough to
+ * accomodate the result (28 bytes). The context is automatically
+ * reinitialized.
+ *
+ * @param cc the SHA-224 context
+ * @param dst the destination buffer
+ */
+void sph_sha224_close(void *cc, void *dst);
+
+/**
+ * Add a few additional bits (0 to 7) to the current computation, then
+ * terminate it and output the result in the provided buffer, which must
+ * be wide enough to accomodate the result (28 bytes). If bit number i
+ * in <code>ub</code> has value 2^i, then the extra bits are those
+ * numbered 7 downto 8-n (this is the big-endian convention at the byte
+ * level). The context is automatically reinitialized.
+ *
+ * @param cc the SHA-224 context
+ * @param ub the extra bits
+ * @param n the number of extra bits (0 to 7)
+ * @param dst the destination buffer
+ */
+void sph_sha224_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst);
+
+/**
+ * Apply the SHA-224 compression function on the provided data. The
+ * <code>msg</code> parameter contains the 16 32-bit input blocks,
+ * as numerical values (hence after the big-endian decoding). The
+ * <code>val</code> parameter contains the 8 32-bit input blocks for
+ * the compression function; the output is written in place in this
+ * array.
+ *
+ * @param msg the message block (16 values)
+ * @param val the function 256-bit input and output
+ */
+void sph_sha224_comp(const sph_u32 msg[16], sph_u32 val[8]);
+
+/**
+ * Initialize a SHA-256 context. This process performs no memory allocation.
+ *
+ * @param cc the SHA-256 context (pointer to
+ * a <code>sph_sha256_context</code>)
+ */
+void sph_sha256_init(void *cc);
+
+#ifdef DOXYGEN_IGNORE
+/**
+ * Process some data bytes, for SHA-256. This function is identical to
+ * <code>sha_224()</code>
+ *
+ * @param cc the SHA-224 context
+ * @param data the input data
+ * @param len the input data length (in bytes)
+ */
+void sph_sha256(void *cc, const void *data, size_t len);
+#endif
+
+#ifndef DOXYGEN_IGNORE
+#define sph_sha256 sph_sha224
+#endif
+
+/**
+ * Terminate the current SHA-256 computation and output the result into the
+ * provided buffer. The destination buffer must be wide enough to
+ * accomodate the result (32 bytes). The context is automatically
+ * reinitialized.
+ *
+ * @param cc the SHA-256 context
+ * @param dst the destination buffer
+ */
+void sph_sha256_close(void *cc, void *dst);
+
+/**
+ * Add a few additional bits (0 to 7) to the current computation, then
+ * terminate it and output the result in the provided buffer, which must
+ * be wide enough to accomodate the result (32 bytes). If bit number i
+ * in <code>ub</code> has value 2^i, then the extra bits are those
+ * numbered 7 downto 8-n (this is the big-endian convention at the byte
+ * level). The context is automatically reinitialized.
+ *
+ * @param cc the SHA-256 context
+ * @param ub the extra bits
+ * @param n the number of extra bits (0 to 7)
+ * @param dst the destination buffer
+ */
+void sph_sha256_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst);
+
+#ifdef DOXYGEN_IGNORE
+/**
+ * Apply the SHA-256 compression function on the provided data. This
+ * function is identical to <code>sha224_comp()</code>.
+ *
+ * @param msg the message block (16 values)
+ * @param val the function 256-bit input and output
+ */
+void sph_sha256_comp(const sph_u32 msg[16], sph_u32 val[8]);
+#endif
+
+#ifndef DOXYGEN_IGNORE
+#define sph_sha256_comp sph_sha224_comp
+#endif
+
+#if SPH_64
+
+/**
+ * Output size (in bits) for SHA-384.
+ */
+#define SPH_SIZE_sha384 384
+
+/**
+ * Output size (in bits) for SHA-512.
+ */
+#define SPH_SIZE_sha512 512
+
+/**
+ * This structure is a context for SHA-384 computations: it contains the
+ * intermediate values and some data from the last entered block. Once
+ * a SHA-384 computation has been performed, the context can be reused for
+ * another computation.
+ *
+ * The contents of this structure are private. A running SHA-384 computation
+ * can be cloned by copying the context (e.g. with a simple
+ * <code>memcpy()</code>).
+ */
+typedef struct {
+#ifndef DOXYGEN_IGNORE
+ unsigned char buf[128]; /* first field, for alignment */
+ sph_u64 val[8];
+ sph_u64 count;
+#endif
+} sph_sha384_context;
+
+/**
+ * Initialize a SHA-384 context. This process performs no memory allocation.
+ *
+ * @param cc the SHA-384 context (pointer to
+ * a <code>sph_sha384_context</code>)
+ */
+void sph_sha384_init(void *cc);
+
+/**
+ * Process some data bytes. It is acceptable that <code>len</code> is zero
+ * (in which case this function does nothing).
+ *
+ * @param cc the SHA-384 context
+ * @param data the input data
+ * @param len the input data length (in bytes)
+ */
+void sph_sha384(void *cc, const void *data, size_t len);
+
+/**
+ * Terminate the current SHA-384 computation and output the result into the
+ * provided buffer. The destination buffer must be wide enough to
+ * accomodate the result (48 bytes). The context is automatically
+ * reinitialized.
+ *
+ * @param cc the SHA-384 context
+ * @param dst the destination buffer
+ */
+void sph_sha384_close(void *cc, void *dst);
+
+/**
+ * Add a few additional bits (0 to 7) to the current computation, then
+ * terminate it and output the result in the provided buffer, which must
+ * be wide enough to accomodate the result (48 bytes). If bit number i
+ * in <code>ub</code> has value 2^i, then the extra bits are those
+ * numbered 7 downto 8-n (this is the big-endian convention at the byte
+ * level). The context is automatically reinitialized.
+ *
+ * @param cc the SHA-384 context
+ * @param ub the extra bits
+ * @param n the number of extra bits (0 to 7)
+ * @param dst the destination buffer
+ */
+void sph_sha384_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst);
+
+/**
+ * Apply the SHA-384 compression function on the provided data. The
+ * <code>msg</code> parameter contains the 16 64-bit input blocks,
+ * as numerical values (hence after the big-endian decoding). The
+ * <code>val</code> parameter contains the 8 64-bit input blocks for
+ * the compression function; the output is written in place in this
+ * array.
+ *
+ * @param msg the message block (16 values)
+ * @param val the function 512-bit input and output
+ */
+void sph_sha384_comp(const sph_u64 msg[16], sph_u64 val[8]);
+
+/**
+ * This structure is a context for SHA-512 computations. It is identical
+ * to the SHA-384 context. However, a context is initialized for SHA-384
+ * <strong>or</strong> SHA-512, but not both (the internal IV is not the
+ * same).
+ */
+typedef sph_sha384_context sph_sha512_context;
+
+/**
+ * Initialize a SHA-512 context. This process performs no memory allocation.
+ *
+ * @param cc the SHA-512 context (pointer to
+ * a <code>sph_sha512_context</code>)
+ */
+void sph_sha512_init(void *cc);
+
+#ifdef DOXYGEN_IGNORE
+/**
+ * Process some data bytes, for SHA-512. This function is identical to
+ * <code>sph_sha384()</code>.
+ *
+ * @param cc the SHA-384 context
+ * @param data the input data
+ * @param len the input data length (in bytes)
+ */
+void sph_sha512(void *cc, const void *data, size_t len);
+#endif
+
+#ifndef DOXYGEN_IGNORE
+#define sph_sha512 sph_sha384
+#endif
+
+/**
+ * Terminate the current SHA-512 computation and output the result into the
+ * provided buffer. The destination buffer must be wide enough to
+ * accomodate the result (64 bytes). The context is automatically
+ * reinitialized.
+ *
+ * @param cc the SHA-512 context
+ * @param dst the destination buffer
+ */
+void sph_sha512_close(void *cc, void *dst);
+
+/**
+ * Add a few additional bits (0 to 7) to the current computation, then
+ * terminate it and output the result in the provided buffer, which must
+ * be wide enough to accomodate the result (64 bytes). If bit number i
+ * in <code>ub</code> has value 2^i, then the extra bits are those
+ * numbered 7 downto 8-n (this is the big-endian convention at the byte
+ * level). The context is automatically reinitialized.
+ *
+ * @param cc the SHA-512 context
+ * @param ub the extra bits
+ * @param n the number of extra bits (0 to 7)
+ * @param dst the destination buffer
+ */
+void sph_sha512_addbits_and_close(void *cc, unsigned ub, unsigned n, void *dst);
+
+#ifdef DOXYGEN_IGNORE
+/**
+ * Apply the SHA-512 compression function. This function is identical to
+ * <code>sph_sha384_comp()</code>.
+ *
+ * @param msg the message block (16 values)
+ * @param val the function 512-bit input and output
+ */
+void sph_sha512_comp(const sph_u64 msg[16], sph_u64 val[8]);
+#endif
+
+#ifndef DOXYGEN_IGNORE
+#define sph_sha512_comp sph_sha384_comp
+#endif
+
+#endif
+
+#endif
diff --git a/libqpdf/sph/sph_types.h b/libqpdf/sph/sph_types.h
new file mode 100644
index 00000000..7295b0b3
--- /dev/null
+++ b/libqpdf/sph/sph_types.h
@@ -0,0 +1,1976 @@
+/* $Id: sph_types.h 260 2011-07-21 01:02:38Z tp $ */
+/**
+ * Basic type definitions.
+ *
+ * This header file defines the generic integer types that will be used
+ * for the implementation of hash functions; it also contains helper
+ * functions which encode and decode multi-byte integer values, using
+ * either little-endian or big-endian conventions.
+ *
+ * This file contains a compile-time test on the size of a byte
+ * (the <code>unsigned char</code> C type). If bytes are not octets,
+ * i.e. if they do not have a size of exactly 8 bits, then compilation
+ * is aborted. Architectures where bytes are not octets are relatively
+ * rare, even in the embedded devices market. We forbid non-octet bytes
+ * because there is no clear convention on how octet streams are encoded
+ * on such systems.
+ *
+ * ==========================(LICENSE BEGIN)============================
+ *
+ * Copyright (c) 2007-2010 Projet RNRT SAPHIR
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * ===========================(LICENSE END)=============================
+ *
+ * @file sph_types.h
+ * @author Thomas Pornin <thomas.pornin@cryptolog.com>
+ */
+
+#ifndef SPH_TYPES_H__
+#define SPH_TYPES_H__
+
+#include <limits.h>
+
+/*
+ * All our I/O functions are defined over octet streams. We do not know
+ * how to handle input data if bytes are not octets.
+ */
+#if CHAR_BIT != 8
+#error This code requires 8-bit bytes
+#endif
+
+/* ============= BEGIN documentation block for Doxygen ============ */
+
+#ifdef DOXYGEN_IGNORE
+
+/** @mainpage sphlib C code documentation
+ *
+ * @section overview Overview
+ *
+ * <code>sphlib</code> is a library which contains implementations of
+ * various cryptographic hash functions. These pages have been generated
+ * with <a href="http://www.doxygen.org/index.html">doxygen</a> and
+ * document the API for the C implementations.
+ *
+ * The API is described in appropriate header files, which are available
+ * in the "Files" section. Each hash function family has its own header,
+ * whose name begins with <code>"sph_"</code> and contains the family
+ * name. For instance, the API for the RIPEMD hash functions is available
+ * in the header file <code>sph_ripemd.h</code>.
+ *
+ * @section principles API structure and conventions
+ *
+ * @subsection io Input/output conventions
+ *
+ * In all generality, hash functions operate over strings of bits.
+ * Individual bits are rarely encountered in C programming or actual
+ * communication protocols; most protocols converge on the ubiquitous
+ * "octet" which is a group of eight bits. Data is thus expressed as a
+ * stream of octets. The C programming language contains the notion of a
+ * "byte", which is a data unit managed under the type <code>"unsigned
+ * char"</code>. The C standard prescribes that a byte should hold at
+ * least eight bits, but possibly more. Most modern architectures, even
+ * in the embedded world, feature eight-bit bytes, i.e. map bytes to
+ * octets.
+ *
+ * Nevertheless, for some of the implemented hash functions, an extra
+ * API has been added, which allows the input of arbitrary sequences of
+ * bits: when the computation is about to be closed, 1 to 7 extra bits
+ * can be added. The functions for which this API is implemented include
+ * the SHA-2 functions and all SHA-3 candidates.
+ *
+ * <code>sphlib</code> defines hash function which may hash octet streams,
+ * i.e. streams of bits where the number of bits is a multiple of eight.
+ * The data input functions in the <code>sphlib</code> API expect data
+ * as anonymous pointers (<code>"const void *"</code>) with a length
+ * (of type <code>"size_t"</code>) which gives the input data chunk length
+ * in bytes. A byte is assumed to be an octet; the <code>sph_types.h</code>
+ * header contains a compile-time test which prevents compilation on
+ * architectures where this property is not met.
+ *
+ * The hash function output is also converted into bytes. All currently
+ * implemented hash functions have an output width which is a multiple of
+ * eight, and this is likely to remain true for new designs.
+ *
+ * Most hash functions internally convert input data into 32-bit of 64-bit
+ * words, using either little-endian or big-endian conversion. The hash
+ * output also often consists of such words, which are encoded into output
+ * bytes with a similar endianness convention. Some hash functions have
+ * been only loosely specified on that subject; when necessary,
+ * <code>sphlib</code> has been tested against published "reference"
+ * implementations in order to use the same conventions.
+ *
+ * @subsection shortname Function short name
+ *
+ * Each implemented hash function has a "short name" which is used
+ * internally to derive the identifiers for the functions and context
+ * structures which the function uses. For instance, MD5 has the short
+ * name <code>"md5"</code>. Short names are listed in the next section,
+ * for the implemented hash functions. In subsequent sections, the
+ * short name will be assumed to be <code>"XXX"</code>: replace with the
+ * actual hash function name to get the C identifier.
+ *
+ * Note: some functions within the same family share the same core
+ * elements, such as update function or context structure. Correspondingly,
+ * some of the defined types or functions may actually be macros which
+ * transparently evaluate to another type or function name.
+ *
+ * @subsection context Context structure
+ *
+ * Each implemented hash fonction has its own context structure, available
+ * under the type name <code>"sph_XXX_context"</code> for the hash function
+ * with short name <code>"XXX"</code>. This structure holds all needed
+ * state for a running hash computation.
+ *
+ * The contents of these structures are meant to be opaque, and private
+ * to the implementation. However, these contents are specified in the
+ * header files so that application code which uses <code>sphlib</code>
+ * may access the size of those structures.
+ *
+ * The caller is responsible for allocating the context structure,
+ * whether by dynamic allocation (<code>malloc()</code> or equivalent),
+ * static allocation (a global permanent variable), as an automatic
+ * variable ("on the stack"), or by any other mean which ensures proper
+ * structure alignment. <code>sphlib</code> code performs no dynamic
+ * allocation by itself.
+ *
+ * The context must be initialized before use, using the
+ * <code>sph_XXX_init()</code> function. This function sets the context
+ * state to proper initial values for hashing.
+ *
+ * Since all state data is contained within the context structure,
+ * <code>sphlib</code> is thread-safe and reentrant: several hash
+ * computations may be performed in parallel, provided that they do not
+ * operate on the same context. Moreover, a running computation can be
+ * cloned by copying the context (with a simple <code>memcpy()</code>):
+ * the context and its clone are then independant and may be updated
+ * with new data and/or closed without interfering with each other.
+ * Similarly, a context structure can be moved in memory at will:
+ * context structures contain no pointer, in particular no pointer to
+ * themselves.
+ *
+ * @subsection dataio Data input
+ *
+ * Hashed data is input with the <code>sph_XXX()</code> fonction, which
+ * takes as parameters a pointer to the context, a pointer to the data
+ * to hash, and the number of data bytes to hash. The context is updated
+ * with the new data.
+ *
+ * Data can be input in one or several calls, with arbitrary input lengths.
+ * However, it is best, performance wise, to input data by relatively big
+ * chunks (say a few kilobytes), because this allows <code>sphlib</code> to
+ * optimize things and avoid internal copying.
+ *
+ * When all data has been input, the context can be closed with
+ * <code>sph_XXX_close()</code>. The hash output is computed and written
+ * into the provided buffer. The caller must take care to provide a
+ * buffer of appropriate length; e.g., when using SHA-1, the output is
+ * a 20-byte word, therefore the output buffer must be at least 20-byte
+ * long.
+ *
+ * For some hash functions, the <code>sph_XXX_addbits_and_close()</code>
+ * function can be used instead of <code>sph_XXX_close()</code>. This
+ * function can take a few extra <strong>bits</strong> to be added at
+ * the end of the input message. This allows hashing messages with a
+ * bit length which is not a multiple of 8. The extra bits are provided
+ * as an unsigned integer value, and a bit count. The bit count must be
+ * between 0 and 7, inclusive. The extra bits are provided as bits 7 to
+ * 0 (bits of numerical value 128, 64, 32... downto 0), in that order.
+ * For instance, to add three bits of value 1, 1 and 0, the unsigned
+ * integer will have value 192 (1*128 + 1*64 + 0*32) and the bit count
+ * will be 3.
+ *
+ * The <code>SPH_SIZE_XXX</code> macro is defined for each hash function;
+ * it evaluates to the function output size, expressed in bits. For instance,
+ * <code>SPH_SIZE_sha1</code> evaluates to <code>160</code>.
+ *
+ * When closed, the context is automatically reinitialized and can be
+ * immediately used for another computation. It is not necessary to call
+ * <code>sph_XXX_init()</code> after a close. Note that
+ * <code>sph_XXX_init()</code> can still be called to "reset" a context,
+ * i.e. forget previously input data, and get back to the initial state.
+ *
+ * @subsection alignment Data alignment
+ *
+ * "Alignment" is a property of data, which is said to be "properly
+ * aligned" when its emplacement in memory is such that the data can
+ * be optimally read by full words. This depends on the type of access;
+ * basically, some hash functions will read data by 32-bit or 64-bit
+ * words. <code>sphlib</code> does not mandate such alignment for input
+ * data, but using aligned data can substantially improve performance.
+ *
+ * As a rule, it is best to input data by chunks whose length (in bytes)
+ * is a multiple of eight, and which begins at "generally aligned"
+ * addresses, such as the base address returned by a call to
+ * <code>malloc()</code>.
+ *
+ * @section functions Implemented functions
+ *
+ * We give here the list of implemented functions. They are grouped by
+ * family; to each family corresponds a specific header file. Each
+ * individual function has its associated "short name". Please refer to
+ * the documentation for that header file to get details on the hash
+ * function denomination and provenance.
+ *
+ * Note: the functions marked with a '(64)' in the list below are
+ * available only if the C compiler provides an integer type of length
+ * 64 bits or more. Such a type is mandatory in the latest C standard
+ * (ISO 9899:1999, aka "C99") and is present in several older compilers
+ * as well, so chances are that such a type is available.
+ *
+ * - HAVAL family: file <code>sph_haval.h</code>
+ * - HAVAL-128/3 (128-bit, 3 passes): short name: <code>haval128_3</code>
+ * - HAVAL-128/4 (128-bit, 4 passes): short name: <code>haval128_4</code>
+ * - HAVAL-128/5 (128-bit, 5 passes): short name: <code>haval128_5</code>
+ * - HAVAL-160/3 (160-bit, 3 passes): short name: <code>haval160_3</code>
+ * - HAVAL-160/4 (160-bit, 4 passes): short name: <code>haval160_4</code>
+ * - HAVAL-160/5 (160-bit, 5 passes): short name: <code>haval160_5</code>
+ * - HAVAL-192/3 (192-bit, 3 passes): short name: <code>haval192_3</code>
+ * - HAVAL-192/4 (192-bit, 4 passes): short name: <code>haval192_4</code>
+ * - HAVAL-192/5 (192-bit, 5 passes): short name: <code>haval192_5</code>
+ * - HAVAL-224/3 (224-bit, 3 passes): short name: <code>haval224_3</code>
+ * - HAVAL-224/4 (224-bit, 4 passes): short name: <code>haval224_4</code>
+ * - HAVAL-224/5 (224-bit, 5 passes): short name: <code>haval224_5</code>
+ * - HAVAL-256/3 (256-bit, 3 passes): short name: <code>haval256_3</code>
+ * - HAVAL-256/4 (256-bit, 4 passes): short name: <code>haval256_4</code>
+ * - HAVAL-256/5 (256-bit, 5 passes): short name: <code>haval256_5</code>
+ * - MD2: file <code>sph_md2.h</code>, short name: <code>md2</code>
+ * - MD4: file <code>sph_md4.h</code>, short name: <code>md4</code>
+ * - MD5: file <code>sph_md5.h</code>, short name: <code>md5</code>
+ * - PANAMA: file <code>sph_panama.h</code>, short name: <code>panama</code>
+ * - RadioGatun family: file <code>sph_radiogatun.h</code>
+ * - RadioGatun[32]: short name: <code>radiogatun32</code>
+ * - RadioGatun[64]: short name: <code>radiogatun64</code> (64)
+ * - RIPEMD family: file <code>sph_ripemd.h</code>
+ * - RIPEMD: short name: <code>ripemd</code>
+ * - RIPEMD-128: short name: <code>ripemd128</code>
+ * - RIPEMD-160: short name: <code>ripemd160</code>
+ * - SHA-0: file <code>sph_sha0.h</code>, short name: <code>sha0</code>
+ * - SHA-1: file <code>sph_sha1.h</code>, short name: <code>sha1</code>
+ * - SHA-2 family, 32-bit hashes: file <code>sph_sha2.h</code>
+ * - SHA-224: short name: <code>sha224</code>
+ * - SHA-256: short name: <code>sha256</code>
+ * - SHA-384: short name: <code>sha384</code> (64)
+ * - SHA-512: short name: <code>sha512</code> (64)
+ * - Tiger family: file <code>sph_tiger.h</code>
+ * - Tiger: short name: <code>tiger</code> (64)
+ * - Tiger2: short name: <code>tiger2</code> (64)
+ * - WHIRLPOOL family: file <code>sph_whirlpool.h</code>
+ * - WHIRLPOOL-0: short name: <code>whirlpool0</code> (64)
+ * - WHIRLPOOL-1: short name: <code>whirlpool1</code> (64)
+ * - WHIRLPOOL: short name: <code>whirlpool</code> (64)
+ *
+ * The fourteen second-round SHA-3 candidates are also implemented;
+ * when applicable, the implementations follow the "final" specifications
+ * as published for the third round of the SHA-3 competition (BLAKE,
+ * Groestl, JH, Keccak and Skein have been tweaked for third round).
+ *
+ * - BLAKE family: file <code>sph_blake.h</code>
+ * - BLAKE-224: short name: <code>blake224</code>
+ * - BLAKE-256: short name: <code>blake256</code>
+ * - BLAKE-384: short name: <code>blake384</code>
+ * - BLAKE-512: short name: <code>blake512</code>
+ * - BMW (Blue Midnight Wish) family: file <code>sph_bmw.h</code>
+ * - BMW-224: short name: <code>bmw224</code>
+ * - BMW-256: short name: <code>bmw256</code>
+ * - BMW-384: short name: <code>bmw384</code> (64)
+ * - BMW-512: short name: <code>bmw512</code> (64)
+ * - CubeHash family: file <code>sph_cubehash.h</code> (specified as
+ * CubeHash16/32 in the CubeHash specification)
+ * - CubeHash-224: short name: <code>cubehash224</code>
+ * - CubeHash-256: short name: <code>cubehash256</code>
+ * - CubeHash-384: short name: <code>cubehash384</code>
+ * - CubeHash-512: short name: <code>cubehash512</code>
+ * - ECHO family: file <code>sph_echo.h</code>
+ * - ECHO-224: short name: <code>echo224</code>
+ * - ECHO-256: short name: <code>echo256</code>
+ * - ECHO-384: short name: <code>echo384</code>
+ * - ECHO-512: short name: <code>echo512</code>
+ * - Fugue family: file <code>sph_fugue.h</code>
+ * - Fugue-224: short name: <code>fugue224</code>
+ * - Fugue-256: short name: <code>fugue256</code>
+ * - Fugue-384: short name: <code>fugue384</code>
+ * - Fugue-512: short name: <code>fugue512</code>
+ * - Groestl family: file <code>sph_groestl.h</code>
+ * - Groestl-224: short name: <code>groestl224</code>
+ * - Groestl-256: short name: <code>groestl256</code>
+ * - Groestl-384: short name: <code>groestl384</code>
+ * - Groestl-512: short name: <code>groestl512</code>
+ * - Hamsi family: file <code>sph_hamsi.h</code>
+ * - Hamsi-224: short name: <code>hamsi224</code>
+ * - Hamsi-256: short name: <code>hamsi256</code>
+ * - Hamsi-384: short name: <code>hamsi384</code>
+ * - Hamsi-512: short name: <code>hamsi512</code>
+ * - JH family: file <code>sph_jh.h</code>
+ * - JH-224: short name: <code>jh224</code>
+ * - JH-256: short name: <code>jh256</code>
+ * - JH-384: short name: <code>jh384</code>
+ * - JH-512: short name: <code>jh512</code>
+ * - Keccak family: file <code>sph_keccak.h</code>
+ * - Keccak-224: short name: <code>keccak224</code>
+ * - Keccak-256: short name: <code>keccak256</code>
+ * - Keccak-384: short name: <code>keccak384</code>
+ * - Keccak-512: short name: <code>keccak512</code>
+ * - Luffa family: file <code>sph_luffa.h</code>
+ * - Luffa-224: short name: <code>luffa224</code>
+ * - Luffa-256: short name: <code>luffa256</code>
+ * - Luffa-384: short name: <code>luffa384</code>
+ * - Luffa-512: short name: <code>luffa512</code>
+ * - Shabal family: file <code>sph_shabal.h</code>
+ * - Shabal-192: short name: <code>shabal192</code>
+ * - Shabal-224: short name: <code>shabal224</code>
+ * - Shabal-256: short name: <code>shabal256</code>
+ * - Shabal-384: short name: <code>shabal384</code>
+ * - Shabal-512: short name: <code>shabal512</code>
+ * - SHAvite-3 family: file <code>sph_shavite.h</code>
+ * - SHAvite-224 (nominally "SHAvite-3 with 224-bit output"):
+ * short name: <code>shabal224</code>
+ * - SHAvite-256 (nominally "SHAvite-3 with 256-bit output"):
+ * short name: <code>shabal256</code>
+ * - SHAvite-384 (nominally "SHAvite-3 with 384-bit output"):
+ * short name: <code>shabal384</code>
+ * - SHAvite-512 (nominally "SHAvite-3 with 512-bit output"):
+ * short name: <code>shabal512</code>
+ * - SIMD family: file <code>sph_simd.h</code>
+ * - SIMD-224: short name: <code>simd224</code>
+ * - SIMD-256: short name: <code>simd256</code>
+ * - SIMD-384: short name: <code>simd384</code>
+ * - SIMD-512: short name: <code>simd512</code>
+ * - Skein family: file <code>sph_skein.h</code>
+ * - Skein-224 (nominally specified as Skein-512-224): short name:
+ * <code>skein224</code> (64)
+ * - Skein-256 (nominally specified as Skein-512-256): short name:
+ * <code>skein256</code> (64)
+ * - Skein-384 (nominally specified as Skein-512-384): short name:
+ * <code>skein384</code> (64)
+ * - Skein-512 (nominally specified as Skein-512-512): short name:
+ * <code>skein512</code> (64)
+ *
+ * For the second-round SHA-3 candidates, the functions are as specified
+ * for round 2, i.e. with the "tweaks" that some candidates added
+ * between round 1 and round 2. Also, some of the submitted packages for
+ * round 2 contained errors, in the specification, reference code, or
+ * both. <code>sphlib</code> implements the corrected versions.
+ */
+
+/** @hideinitializer
+ * Unsigned integer type whose length is at least 32 bits; on most
+ * architectures, it will have a width of exactly 32 bits. Unsigned C
+ * types implement arithmetics modulo a power of 2; use the
+ * <code>SPH_T32()</code> macro to ensure that the value is truncated
+ * to exactly 32 bits. Unless otherwise specified, all macros and
+ * functions which accept <code>sph_u32</code> values assume that these
+ * values fit on 32 bits, i.e. do not exceed 2^32-1, even on architectures
+ * where <code>sph_u32</code> is larger than that.
+ */
+typedef __arch_dependant__ sph_u32;
+
+/** @hideinitializer
+ * Signed integer type corresponding to <code>sph_u32</code>; it has
+ * width 32 bits or more.
+ */
+typedef __arch_dependant__ sph_s32;
+
+/** @hideinitializer
+ * Unsigned integer type whose length is at least 64 bits; on most
+ * architectures which feature such a type, it will have a width of
+ * exactly 64 bits. C99-compliant platform will have this type; it
+ * is also defined when the GNU compiler (gcc) is used, and on
+ * platforms where <code>unsigned long</code> is large enough. If this
+ * type is not available, then some hash functions which depends on
+ * a 64-bit type will not be available (most notably SHA-384, SHA-512,
+ * Tiger and WHIRLPOOL).
+ */
+typedef __arch_dependant__ sph_u64;
+
+/** @hideinitializer
+ * Signed integer type corresponding to <code>sph_u64</code>; it has
+ * width 64 bits or more.
+ */
+typedef __arch_dependant__ sph_s64;
+
+/**
+ * This macro expands the token <code>x</code> into a suitable
+ * constant expression of type <code>sph_u32</code>. Depending on
+ * how this type is defined, a suffix such as <code>UL</code> may
+ * be appended to the argument.
+ *
+ * @param x the token to expand into a suitable constant expression
+ */
+#define SPH_C32(x)
+
+/**
+ * Truncate a 32-bit value to exactly 32 bits. On most systems, this is
+ * a no-op, recognized as such by the compiler.
+ *
+ * @param x the value to truncate (of type <code>sph_u32</code>)
+ */
+#define SPH_T32(x)
+
+/**
+ * Rotate a 32-bit value by a number of bits to the left. The rotate
+ * count must reside between 1 and 31. This macro assumes that its
+ * first argument fits in 32 bits (no extra bit allowed on machines where
+ * <code>sph_u32</code> is wider); both arguments may be evaluated
+ * several times.
+ *
+ * @param x the value to rotate (of type <code>sph_u32</code>)
+ * @param n the rotation count (between 1 and 31, inclusive)
+ */
+#define SPH_ROTL32(x, n)
+
+/**
+ * Rotate a 32-bit value by a number of bits to the left. The rotate
+ * count must reside between 1 and 31. This macro assumes that its
+ * first argument fits in 32 bits (no extra bit allowed on machines where
+ * <code>sph_u32</code> is wider); both arguments may be evaluated
+ * several times.
+ *
+ * @param x the value to rotate (of type <code>sph_u32</code>)
+ * @param n the rotation count (between 1 and 31, inclusive)
+ */
+#define SPH_ROTR32(x, n)
+
+/**
+ * This macro is defined on systems for which a 64-bit type has been
+ * detected, and is used for <code>sph_u64</code>.
+ */
+#define SPH_64
+
+/**
+ * This macro is defined on systems for the "native" integer size is
+ * 64 bits (64-bit values fit in one register).
+ */
+#define SPH_64_TRUE
+
+/**
+ * This macro expands the token <code>x</code> into a suitable
+ * constant expression of type <code>sph_u64</code>. Depending on
+ * how this type is defined, a suffix such as <code>ULL</code> may
+ * be appended to the argument. This macro is defined only if a
+ * 64-bit type was detected and used for <code>sph_u64</code>.
+ *
+ * @param x the token to expand into a suitable constant expression
+ */
+#define SPH_C64(x)
+
+/**
+ * Truncate a 64-bit value to exactly 64 bits. On most systems, this is
+ * a no-op, recognized as such by the compiler. This macro is defined only
+ * if a 64-bit type was detected and used for <code>sph_u64</code>.
+ *
+ * @param x the value to truncate (of type <code>sph_u64</code>)
+ */
+#define SPH_T64(x)
+
+/**
+ * Rotate a 64-bit value by a number of bits to the left. The rotate
+ * count must reside between 1 and 63. This macro assumes that its
+ * first argument fits in 64 bits (no extra bit allowed on machines where
+ * <code>sph_u64</code> is wider); both arguments may be evaluated
+ * several times. This macro is defined only if a 64-bit type was detected
+ * and used for <code>sph_u64</code>.
+ *
+ * @param x the value to rotate (of type <code>sph_u64</code>)
+ * @param n the rotation count (between 1 and 63, inclusive)
+ */
+#define SPH_ROTL64(x, n)
+
+/**
+ * Rotate a 64-bit value by a number of bits to the left. The rotate
+ * count must reside between 1 and 63. This macro assumes that its
+ * first argument fits in 64 bits (no extra bit allowed on machines where
+ * <code>sph_u64</code> is wider); both arguments may be evaluated
+ * several times. This macro is defined only if a 64-bit type was detected
+ * and used for <code>sph_u64</code>.
+ *
+ * @param x the value to rotate (of type <code>sph_u64</code>)
+ * @param n the rotation count (between 1 and 63, inclusive)
+ */
+#define SPH_ROTR64(x, n)
+
+/**
+ * This macro evaluates to <code>inline</code> or an equivalent construction,
+ * if available on the compilation platform, or to nothing otherwise. This
+ * is used to declare inline functions, for which the compiler should
+ * endeavour to include the code directly in the caller. Inline functions
+ * are typically defined in header files as replacement for macros.
+ */
+#define SPH_INLINE
+
+/**
+ * This macro is defined if the platform has been detected as using
+ * little-endian convention. This implies that the <code>sph_u32</code>
+ * type (and the <code>sph_u64</code> type also, if it is defined) has
+ * an exact width (i.e. exactly 32-bit, respectively 64-bit).
+ */
+#define SPH_LITTLE_ENDIAN
+
+/**
+ * This macro is defined if the platform has been detected as using
+ * big-endian convention. This implies that the <code>sph_u32</code>
+ * type (and the <code>sph_u64</code> type also, if it is defined) has
+ * an exact width (i.e. exactly 32-bit, respectively 64-bit).
+ */
+#define SPH_BIG_ENDIAN
+
+/**
+ * This macro is defined if 32-bit words (and 64-bit words, if defined)
+ * can be read from and written to memory efficiently in little-endian
+ * convention. This is the case for little-endian platforms, and also
+ * for the big-endian platforms which have special little-endian access
+ * opcodes (e.g. Ultrasparc).
+ */
+#define SPH_LITTLE_FAST
+
+/**
+ * This macro is defined if 32-bit words (and 64-bit words, if defined)
+ * can be read from and written to memory efficiently in big-endian
+ * convention. This is the case for little-endian platforms, and also
+ * for the little-endian platforms which have special big-endian access
+ * opcodes.
+ */
+#define SPH_BIG_FAST
+
+/**
+ * On some platforms, this macro is defined to an unsigned integer type
+ * into which pointer values may be cast. The resulting value can then
+ * be tested for being a multiple of 2, 4 or 8, indicating an aligned
+ * pointer for, respectively, 16-bit, 32-bit or 64-bit memory accesses.
+ */
+#define SPH_UPTR
+
+/**
+ * When defined, this macro indicates that unaligned memory accesses
+ * are possible with only a minor penalty, and thus should be prefered
+ * over strategies which first copy data to an aligned buffer.
+ */
+#define SPH_UNALIGNED
+
+/**
+ * Byte-swap a 32-bit word (i.e. <code>0x12345678</code> becomes
+ * <code>0x78563412</code>). This is an inline function which resorts
+ * to inline assembly on some platforms, for better performance.
+ *
+ * @param x the 32-bit value to byte-swap
+ * @return the byte-swapped value
+ */
+static inline sph_u32 sph_bswap32(sph_u32 x);
+
+/**
+ * Byte-swap a 64-bit word. This is an inline function which resorts
+ * to inline assembly on some platforms, for better performance. This
+ * function is defined only if a suitable 64-bit type was found for
+ * <code>sph_u64</code>
+ *
+ * @param x the 64-bit value to byte-swap
+ * @return the byte-swapped value
+ */
+static inline sph_u64 sph_bswap64(sph_u64 x);
+
+/**
+ * Decode a 16-bit unsigned value from memory, in little-endian convention
+ * (least significant byte comes first).
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline unsigned sph_dec16le(const void *src);
+
+/**
+ * Encode a 16-bit unsigned value into memory, in little-endian convention
+ * (least significant byte comes first).
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc16le(void *dst, unsigned val);
+
+/**
+ * Decode a 16-bit unsigned value from memory, in big-endian convention
+ * (most significant byte comes first).
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline unsigned sph_dec16be(const void *src);
+
+/**
+ * Encode a 16-bit unsigned value into memory, in big-endian convention
+ * (most significant byte comes first).
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc16be(void *dst, unsigned val);
+
+/**
+ * Decode a 32-bit unsigned value from memory, in little-endian convention
+ * (least significant byte comes first).
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline sph_u32 sph_dec32le(const void *src);
+
+/**
+ * Decode a 32-bit unsigned value from memory, in little-endian convention
+ * (least significant byte comes first). This function assumes that the
+ * source address is suitably aligned for a direct access, if the platform
+ * supports such things; it can thus be marginally faster than the generic
+ * <code>sph_dec32le()</code> function.
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline sph_u32 sph_dec32le_aligned(const void *src);
+
+/**
+ * Encode a 32-bit unsigned value into memory, in little-endian convention
+ * (least significant byte comes first).
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc32le(void *dst, sph_u32 val);
+
+/**
+ * Encode a 32-bit unsigned value into memory, in little-endian convention
+ * (least significant byte comes first). This function assumes that the
+ * destination address is suitably aligned for a direct access, if the
+ * platform supports such things; it can thus be marginally faster than
+ * the generic <code>sph_enc32le()</code> function.
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc32le_aligned(void *dst, sph_u32 val);
+
+/**
+ * Decode a 32-bit unsigned value from memory, in big-endian convention
+ * (most significant byte comes first).
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline sph_u32 sph_dec32be(const void *src);
+
+/**
+ * Decode a 32-bit unsigned value from memory, in big-endian convention
+ * (most significant byte comes first). This function assumes that the
+ * source address is suitably aligned for a direct access, if the platform
+ * supports such things; it can thus be marginally faster than the generic
+ * <code>sph_dec32be()</code> function.
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline sph_u32 sph_dec32be_aligned(const void *src);
+
+/**
+ * Encode a 32-bit unsigned value into memory, in big-endian convention
+ * (most significant byte comes first).
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc32be(void *dst, sph_u32 val);
+
+/**
+ * Encode a 32-bit unsigned value into memory, in big-endian convention
+ * (most significant byte comes first). This function assumes that the
+ * destination address is suitably aligned for a direct access, if the
+ * platform supports such things; it can thus be marginally faster than
+ * the generic <code>sph_enc32be()</code> function.
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc32be_aligned(void *dst, sph_u32 val);
+
+/**
+ * Decode a 64-bit unsigned value from memory, in little-endian convention
+ * (least significant byte comes first). This function is defined only
+ * if a suitable 64-bit type was detected and used for <code>sph_u64</code>.
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline sph_u64 sph_dec64le(const void *src);
+
+/**
+ * Decode a 64-bit unsigned value from memory, in little-endian convention
+ * (least significant byte comes first). This function assumes that the
+ * source address is suitably aligned for a direct access, if the platform
+ * supports such things; it can thus be marginally faster than the generic
+ * <code>sph_dec64le()</code> function. This function is defined only
+ * if a suitable 64-bit type was detected and used for <code>sph_u64</code>.
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline sph_u64 sph_dec64le_aligned(const void *src);
+
+/**
+ * Encode a 64-bit unsigned value into memory, in little-endian convention
+ * (least significant byte comes first). This function is defined only
+ * if a suitable 64-bit type was detected and used for <code>sph_u64</code>.
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc64le(void *dst, sph_u64 val);
+
+/**
+ * Encode a 64-bit unsigned value into memory, in little-endian convention
+ * (least significant byte comes first). This function assumes that the
+ * destination address is suitably aligned for a direct access, if the
+ * platform supports such things; it can thus be marginally faster than
+ * the generic <code>sph_enc64le()</code> function. This function is defined
+ * only if a suitable 64-bit type was detected and used for
+ * <code>sph_u64</code>.
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc64le_aligned(void *dst, sph_u64 val);
+
+/**
+ * Decode a 64-bit unsigned value from memory, in big-endian convention
+ * (most significant byte comes first). This function is defined only
+ * if a suitable 64-bit type was detected and used for <code>sph_u64</code>.
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline sph_u64 sph_dec64be(const void *src);
+
+/**
+ * Decode a 64-bit unsigned value from memory, in big-endian convention
+ * (most significant byte comes first). This function assumes that the
+ * source address is suitably aligned for a direct access, if the platform
+ * supports such things; it can thus be marginally faster than the generic
+ * <code>sph_dec64be()</code> function. This function is defined only
+ * if a suitable 64-bit type was detected and used for <code>sph_u64</code>.
+ *
+ * @param src the source address
+ * @return the decoded value
+ */
+static inline sph_u64 sph_dec64be_aligned(const void *src);
+
+/**
+ * Encode a 64-bit unsigned value into memory, in big-endian convention
+ * (most significant byte comes first). This function is defined only
+ * if a suitable 64-bit type was detected and used for <code>sph_u64</code>.
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc64be(void *dst, sph_u64 val);
+
+/**
+ * Encode a 64-bit unsigned value into memory, in big-endian convention
+ * (most significant byte comes first). This function assumes that the
+ * destination address is suitably aligned for a direct access, if the
+ * platform supports such things; it can thus be marginally faster than
+ * the generic <code>sph_enc64be()</code> function. This function is defined
+ * only if a suitable 64-bit type was detected and used for
+ * <code>sph_u64</code>.
+ *
+ * @param dst the destination buffer
+ * @param val the value to encode
+ */
+static inline void sph_enc64be_aligned(void *dst, sph_u64 val);
+
+#endif
+
+/* ============== END documentation block for Doxygen ============= */
+
+#ifndef DOXYGEN_IGNORE
+
+/*
+ * We want to define the types "sph_u32" and "sph_u64" which hold
+ * unsigned values of at least, respectively, 32 and 64 bits. These
+ * tests should select appropriate types for most platforms. The
+ * macro "SPH_64" is defined if the 64-bit is supported.
+ */
+
+#undef SPH_64
+#undef SPH_64_TRUE
+
+#if defined __STDC__ && __STDC_VERSION__ >= 199901L
+
+/*
+ * On C99 implementations, we can use <stdint.h> to get an exact 64-bit
+ * type, if any, or otherwise use a wider type (which must exist, for
+ * C99 conformance).
+ */
+
+#include <stdint.h>
+
+#ifdef UINT32_MAX
+typedef uint32_t sph_u32;
+typedef int32_t sph_s32;
+#else
+typedef uint_fast32_t sph_u32;
+typedef int_fast32_t sph_s32;
+#endif
+#if !SPH_NO_64
+#ifdef UINT64_MAX
+typedef uint64_t sph_u64;
+typedef int64_t sph_s64;
+#else
+typedef uint_fast64_t sph_u64;
+typedef int_fast64_t sph_s64;
+#endif
+#endif
+
+#define SPH_C32(x) ((sph_u32)(x))
+#if !SPH_NO_64
+#define SPH_C64(x) ((sph_u64)(x))
+#define SPH_64 1
+#endif
+
+#else
+
+/*
+ * On non-C99 systems, we use "unsigned int" if it is wide enough,
+ * "unsigned long" otherwise. This supports all "reasonable" architectures.
+ * We have to be cautious: pre-C99 preprocessors handle constants
+ * differently in '#if' expressions. Hence the shifts to test UINT_MAX.
+ */
+
+#if ((UINT_MAX >> 11) >> 11) >= 0x3FF
+
+typedef unsigned int sph_u32;
+typedef int sph_s32;
+
+#define SPH_C32(x) ((sph_u32)(x ## U))
+
+#else
+
+typedef unsigned long sph_u32;
+typedef long sph_s32;
+
+#define SPH_C32(x) ((sph_u32)(x ## UL))
+
+#endif
+
+#if !SPH_NO_64
+
+/*
+ * We want a 64-bit type. We use "unsigned long" if it is wide enough (as
+ * is common on 64-bit architectures such as AMD64, Alpha or Sparcv9),
+ * "unsigned long long" otherwise, if available. We use ULLONG_MAX to
+ * test whether "unsigned long long" is available; we also know that
+ * gcc features this type, even if the libc header do not know it.
+ */
+
+#if ((ULONG_MAX >> 31) >> 31) >= 3
+
+typedef unsigned long sph_u64;
+typedef long sph_s64;
+
+#define SPH_C64(x) ((sph_u64)(x ## UL))
+
+#define SPH_64 1
+
+#elif ((ULLONG_MAX >> 31) >> 31) >= 3 || defined __GNUC__
+
+typedef unsigned long long sph_u64;
+typedef long long sph_s64;
+
+#define SPH_C64(x) ((sph_u64)(x ## ULL))
+
+#define SPH_64 1
+
+#else
+
+/*
+ * No 64-bit type...
+ */
+
+#endif
+
+#endif
+
+#endif
+
+/*
+ * If the "unsigned long" type has length 64 bits or more, then this is
+ * a "true" 64-bit architectures. This is also true with Visual C on
+ * amd64, even though the "long" type is limited to 32 bits.
+ */
+#if SPH_64 && (((ULONG_MAX >> 31) >> 31) >= 3 || defined _M_X64)
+#define SPH_64_TRUE 1
+#endif
+
+/*
+ * Implementation note: some processors have specific opcodes to perform
+ * a rotation. Recent versions of gcc recognize the expression above and
+ * use the relevant opcodes, when appropriate.
+ */
+
+#define SPH_T32(x) ((x) & SPH_C32(0xFFFFFFFF))
+#define SPH_ROTL32(x, n) SPH_T32(((x) << (n)) | ((x) >> (32 - (n))))
+#define SPH_ROTR32(x, n) SPH_ROTL32(x, (32 - (n)))
+
+#if SPH_64
+
+#define SPH_T64(x) ((x) & SPH_C64(0xFFFFFFFFFFFFFFFF))
+#define SPH_ROTL64(x, n) SPH_T64(((x) << (n)) | ((x) >> (64 - (n))))
+#define SPH_ROTR64(x, n) SPH_ROTL64(x, (64 - (n)))
+
+#endif
+
+#ifndef DOXYGEN_IGNORE
+/*
+ * Define SPH_INLINE to be an "inline" qualifier, if available. We define
+ * some small macro-like functions which benefit greatly from being inlined.
+ */
+#if (defined __STDC__ && __STDC_VERSION__ >= 199901L) || defined __GNUC__
+#define SPH_INLINE inline
+#elif defined _MSC_VER
+#define SPH_INLINE __inline
+#else
+#define SPH_INLINE
+#endif
+#endif
+
+/*
+ * We define some macros which qualify the architecture. These macros
+ * may be explicit set externally (e.g. as compiler parameters). The
+ * code below sets those macros if they are not already defined.
+ *
+ * Most macros are boolean, thus evaluate to either zero or non-zero.
+ * The SPH_UPTR macro is special, in that it evaluates to a C type,
+ * or is not defined.
+ *
+ * SPH_UPTR if defined: unsigned type to cast pointers into
+ *
+ * SPH_UNALIGNED non-zero if unaligned accesses are efficient
+ * SPH_LITTLE_ENDIAN non-zero if architecture is known to be little-endian
+ * SPH_BIG_ENDIAN non-zero if architecture is known to be big-endian
+ * SPH_LITTLE_FAST non-zero if little-endian decoding is fast
+ * SPH_BIG_FAST non-zero if big-endian decoding is fast
+ *
+ * If SPH_UPTR is defined, then encoding and decoding of 32-bit and 64-bit
+ * values will try to be "smart". Either SPH_LITTLE_ENDIAN or SPH_BIG_ENDIAN
+ * _must_ be non-zero in those situations. The 32-bit and 64-bit types
+ * _must_ also have an exact width.
+ *
+ * SPH_SPARCV9_GCC_32 UltraSPARC-compatible with gcc, 32-bit mode
+ * SPH_SPARCV9_GCC_64 UltraSPARC-compatible with gcc, 64-bit mode
+ * SPH_SPARCV9_GCC UltraSPARC-compatible with gcc
+ * SPH_I386_GCC x86-compatible (32-bit) with gcc
+ * SPH_I386_MSVC x86-compatible (32-bit) with Microsoft Visual C
+ * SPH_AMD64_GCC x86-compatible (64-bit) with gcc
+ * SPH_AMD64_MSVC x86-compatible (64-bit) with Microsoft Visual C
+ * SPH_PPC32_GCC PowerPC, 32-bit, with gcc
+ * SPH_PPC64_GCC PowerPC, 64-bit, with gcc
+ *
+ * TODO: enhance automatic detection, for more architectures and compilers.
+ * Endianness is the most important. SPH_UNALIGNED and SPH_UPTR help with
+ * some very fast functions (e.g. MD4) when using unaligned input data.
+ * The CPU-specific-with-GCC macros are useful only for inline assembly,
+ * normally restrained to this header file.
+ */
+
+/*
+ * 32-bit x86, aka "i386 compatible".
+ */
+#if defined __i386__ || defined _M_IX86
+
+#define SPH_DETECT_UNALIGNED 1
+#define SPH_DETECT_LITTLE_ENDIAN 1
+#define SPH_DETECT_UPTR sph_u32
+#ifdef __GNUC__
+#define SPH_DETECT_I386_GCC 1
+#endif
+#ifdef _MSC_VER
+#define SPH_DETECT_I386_MSVC 1
+#endif
+
+/*
+ * 64-bit x86, hereafter known as "amd64".
+ */
+#elif defined __x86_64 || defined _M_X64
+
+#define SPH_DETECT_UNALIGNED 1
+#define SPH_DETECT_LITTLE_ENDIAN 1
+#define SPH_DETECT_UPTR sph_u64
+#ifdef __GNUC__
+#define SPH_DETECT_AMD64_GCC 1
+#endif
+#ifdef _MSC_VER
+#define SPH_DETECT_AMD64_MSVC 1
+#endif
+
+/*
+ * 64-bit Sparc architecture (implies v9).
+ */
+#elif ((defined __sparc__ || defined __sparc) && defined __arch64__) \
+ || defined __sparcv9
+
+#define SPH_DETECT_BIG_ENDIAN 1
+#define SPH_DETECT_UPTR sph_u64
+#ifdef __GNUC__
+#define SPH_DETECT_SPARCV9_GCC_64 1
+#define SPH_DETECT_LITTLE_FAST 1
+#endif
+
+/*
+ * 32-bit Sparc.
+ */
+#elif (defined __sparc__ || defined __sparc) \
+ && !(defined __sparcv9 || defined __arch64__)
+
+#define SPH_DETECT_BIG_ENDIAN 1
+#define SPH_DETECT_UPTR sph_u32
+#if defined __GNUC__ && defined __sparc_v9__
+#define SPH_DETECT_SPARCV9_GCC_32 1
+#define SPH_DETECT_LITTLE_FAST 1
+#endif
+
+/*
+ * ARM, little-endian.
+ */
+#elif defined __arm__ && __ARMEL__
+
+#define SPH_DETECT_LITTLE_ENDIAN 1
+
+/*
+ * MIPS, little-endian.
+ */
+#elif MIPSEL || _MIPSEL || __MIPSEL || __MIPSEL__
+
+#define SPH_DETECT_LITTLE_ENDIAN 1
+
+/*
+ * MIPS, big-endian.
+ */
+#elif MIPSEB || _MIPSEB || __MIPSEB || __MIPSEB__
+
+#define SPH_DETECT_BIG_ENDIAN 1
+
+/*
+ * PowerPC.
+ */
+#elif defined __powerpc__ || defined __POWERPC__ || defined __ppc__ \
+ || defined _ARCH_PPC
+
+/*
+ * Note: we do not declare cross-endian access to be "fast": even if
+ * using inline assembly, implementation should still assume that
+ * keeping the decoded word in a temporary is faster than decoding
+ * it again.
+ */
+#if defined __GNUC__
+#if SPH_64_TRUE
+#define SPH_DETECT_PPC64_GCC 1
+#else
+#define SPH_DETECT_PPC32_GCC 1
+#endif
+#endif
+
+#if defined __BIG_ENDIAN__ || defined _BIG_ENDIAN
+#define SPH_DETECT_BIG_ENDIAN 1
+#elif defined __LITTLE_ENDIAN__ || defined _LITTLE_ENDIAN
+#define SPH_DETECT_LITTLE_ENDIAN 1
+#endif
+
+/*
+ * Itanium, 64-bit.
+ */
+#elif defined __ia64 || defined __ia64__ \
+ || defined __itanium__ || defined _M_IA64
+
+#if defined __BIG_ENDIAN__ || defined _BIG_ENDIAN
+#define SPH_DETECT_BIG_ENDIAN 1
+#else
+#define SPH_DETECT_LITTLE_ENDIAN 1
+#endif
+#if defined __LP64__ || defined _LP64
+#define SPH_DETECT_UPTR sph_u64
+#else
+#define SPH_DETECT_UPTR sph_u32
+#endif
+
+#endif
+
+#if defined SPH_DETECT_SPARCV9_GCC_32 || defined SPH_DETECT_SPARCV9_GCC_64
+#define SPH_DETECT_SPARCV9_GCC 1
+#endif
+
+#if defined SPH_DETECT_UNALIGNED && !defined SPH_UNALIGNED
+#define SPH_UNALIGNED SPH_DETECT_UNALIGNED
+#endif
+#if defined SPH_DETECT_UPTR && !defined SPH_UPTR
+#define SPH_UPTR SPH_DETECT_UPTR
+#endif
+#if defined SPH_DETECT_LITTLE_ENDIAN && !defined SPH_LITTLE_ENDIAN
+#define SPH_LITTLE_ENDIAN SPH_DETECT_LITTLE_ENDIAN
+#endif
+#if defined SPH_DETECT_BIG_ENDIAN && !defined SPH_BIG_ENDIAN
+#define SPH_BIG_ENDIAN SPH_DETECT_BIG_ENDIAN
+#endif
+#if defined SPH_DETECT_LITTLE_FAST && !defined SPH_LITTLE_FAST
+#define SPH_LITTLE_FAST SPH_DETECT_LITTLE_FAST
+#endif
+#if defined SPH_DETECT_BIG_FAST && !defined SPH_BIG_FAST
+#define SPH_BIG_FAST SPH_DETECT_BIG_FAST
+#endif
+#if defined SPH_DETECT_SPARCV9_GCC_32 && !defined SPH_SPARCV9_GCC_32
+#define SPH_SPARCV9_GCC_32 SPH_DETECT_SPARCV9_GCC_32
+#endif
+#if defined SPH_DETECT_SPARCV9_GCC_64 && !defined SPH_SPARCV9_GCC_64
+#define SPH_SPARCV9_GCC_64 SPH_DETECT_SPARCV9_GCC_64
+#endif
+#if defined SPH_DETECT_SPARCV9_GCC && !defined SPH_SPARCV9_GCC
+#define SPH_SPARCV9_GCC SPH_DETECT_SPARCV9_GCC
+#endif
+#if defined SPH_DETECT_I386_GCC && !defined SPH_I386_GCC
+#define SPH_I386_GCC SPH_DETECT_I386_GCC
+#endif
+#if defined SPH_DETECT_I386_MSVC && !defined SPH_I386_MSVC
+#define SPH_I386_MSVC SPH_DETECT_I386_MSVC
+#endif
+#if defined SPH_DETECT_AMD64_GCC && !defined SPH_AMD64_GCC
+#define SPH_AMD64_GCC SPH_DETECT_AMD64_GCC
+#endif
+#if defined SPH_DETECT_AMD64_MSVC && !defined SPH_AMD64_MSVC
+#define SPH_AMD64_MSVC SPH_DETECT_AMD64_MSVC
+#endif
+#if defined SPH_DETECT_PPC32_GCC && !defined SPH_PPC32_GCC
+#define SPH_PPC32_GCC SPH_DETECT_PPC32_GCC
+#endif
+#if defined SPH_DETECT_PPC64_GCC && !defined SPH_PPC64_GCC
+#define SPH_PPC64_GCC SPH_DETECT_PPC64_GCC
+#endif
+
+#if SPH_LITTLE_ENDIAN && !defined SPH_LITTLE_FAST
+#define SPH_LITTLE_FAST 1
+#endif
+#if SPH_BIG_ENDIAN && !defined SPH_BIG_FAST
+#define SPH_BIG_FAST 1
+#endif
+
+#if defined SPH_UPTR && !(SPH_LITTLE_ENDIAN || SPH_BIG_ENDIAN)
+#error SPH_UPTR defined, but endianness is not known.
+#endif
+
+#if SPH_I386_GCC && !SPH_NO_ASM
+
+/*
+ * On x86 32-bit, with gcc, we use the bswapl opcode to byte-swap 32-bit
+ * values.
+ */
+
+static SPH_INLINE sph_u32
+sph_bswap32(sph_u32 x)
+{
+ __asm__ __volatile__ ("bswapl %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+#if SPH_64
+
+static SPH_INLINE sph_u64
+sph_bswap64(sph_u64 x)
+{
+ return ((sph_u64)sph_bswap32((sph_u32)x) << 32)
+ | (sph_u64)sph_bswap32((sph_u32)(x >> 32));
+}
+
+#endif
+
+#elif SPH_AMD64_GCC && !SPH_NO_ASM
+
+/*
+ * On x86 64-bit, with gcc, we use the bswapl opcode to byte-swap 32-bit
+ * and 64-bit values.
+ */
+
+static SPH_INLINE sph_u32
+sph_bswap32(sph_u32 x)
+{
+ __asm__ __volatile__ ("bswapl %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+#if SPH_64
+
+static SPH_INLINE sph_u64
+sph_bswap64(sph_u64 x)
+{
+ __asm__ __volatile__ ("bswapq %0" : "=r" (x) : "0" (x));
+ return x;
+}
+
+#endif
+
+/*
+ * Disabled code. Apparently, Microsoft Visual C 2005 is smart enough
+ * to generate proper opcodes for endianness swapping with the pure C
+ * implementation below.
+ *
+
+#elif SPH_I386_MSVC && !SPH_NO_ASM
+
+static __inline sph_u32 __declspec(naked) __fastcall
+sph_bswap32(sph_u32 x)
+{
+ __asm {
+ bswap ecx
+ mov eax,ecx
+ ret
+ }
+}
+
+#if SPH_64
+
+static SPH_INLINE sph_u64
+sph_bswap64(sph_u64 x)
+{
+ return ((sph_u64)sph_bswap32((sph_u32)x) << 32)
+ | (sph_u64)sph_bswap32((sph_u32)(x >> 32));
+}
+
+#endif
+
+ *
+ * [end of disabled code]
+ */
+
+#else
+
+static SPH_INLINE sph_u32
+sph_bswap32(sph_u32 x)
+{
+ x = SPH_T32((x << 16) | (x >> 16));
+ x = ((x & SPH_C32(0xFF00FF00)) >> 8)
+ | ((x & SPH_C32(0x00FF00FF)) << 8);
+ return x;
+}
+
+#if SPH_64
+
+/**
+ * Byte-swap a 64-bit value.
+ *
+ * @param x the input value
+ * @return the byte-swapped value
+ */
+static SPH_INLINE sph_u64
+sph_bswap64(sph_u64 x)
+{
+ x = SPH_T64((x << 32) | (x >> 32));
+ x = ((x & SPH_C64(0xFFFF0000FFFF0000)) >> 16)
+ | ((x & SPH_C64(0x0000FFFF0000FFFF)) << 16);
+ x = ((x & SPH_C64(0xFF00FF00FF00FF00)) >> 8)
+ | ((x & SPH_C64(0x00FF00FF00FF00FF)) << 8);
+ return x;
+}
+
+#endif
+
+#endif
+
+#if SPH_SPARCV9_GCC && !SPH_NO_ASM
+
+/*
+ * On UltraSPARC systems, native ordering is big-endian, but it is
+ * possible to perform little-endian read accesses by specifying the
+ * address space 0x88 (ASI_PRIMARY_LITTLE). Basically, either we use
+ * the opcode "lda [%reg]0x88,%dst", where %reg is the register which
+ * contains the source address and %dst is the destination register,
+ * or we use "lda [%reg+imm]%asi,%dst", which uses the %asi register
+ * to get the address space name. The latter format is better since it
+ * combines an addition and the actual access in a single opcode; but
+ * it requires the setting (and subsequent resetting) of %asi, which is
+ * slow. Some operations (i.e. MD5 compression function) combine many
+ * successive little-endian read accesses, which may share the same
+ * %asi setting. The macros below contain the appropriate inline
+ * assembly.
+ */
+
+#define SPH_SPARCV9_SET_ASI \
+ sph_u32 sph_sparcv9_asi; \
+ __asm__ __volatile__ ( \
+ "rd %%asi,%0\n\twr %%g0,0x88,%%asi" : "=r" (sph_sparcv9_asi));
+
+#define SPH_SPARCV9_RESET_ASI \
+ __asm__ __volatile__ ("wr %%g0,%0,%%asi" : : "r" (sph_sparcv9_asi));
+
+#define SPH_SPARCV9_DEC32LE(base, idx) ({ \
+ sph_u32 sph_sparcv9_tmp; \
+ __asm__ __volatile__ ("lda [%1+" #idx "*4]%%asi,%0" \
+ : "=r" (sph_sparcv9_tmp) : "r" (base)); \
+ sph_sparcv9_tmp; \
+ })
+
+#endif
+
+static SPH_INLINE void
+sph_enc16be(void *dst, unsigned val)
+{
+ ((unsigned char *)dst)[0] = (val >> 8);
+ ((unsigned char *)dst)[1] = val;
+}
+
+static SPH_INLINE unsigned
+sph_dec16be(const void *src)
+{
+ return ((unsigned)(((const unsigned char *)src)[0]) << 8)
+ | (unsigned)(((const unsigned char *)src)[1]);
+}
+
+static SPH_INLINE void
+sph_enc16le(void *dst, unsigned val)
+{
+ ((unsigned char *)dst)[0] = val;
+ ((unsigned char *)dst)[1] = val >> 8;
+}
+
+static SPH_INLINE unsigned
+sph_dec16le(const void *src)
+{
+ return (unsigned)(((const unsigned char *)src)[0])
+ | ((unsigned)(((const unsigned char *)src)[1]) << 8);
+}
+
+/**
+ * Encode a 32-bit value into the provided buffer (big endian convention).
+ *
+ * @param dst the destination buffer
+ * @param val the 32-bit value to encode
+ */
+static SPH_INLINE void
+sph_enc32be(void *dst, sph_u32 val)
+{
+#if defined SPH_UPTR
+#if SPH_UNALIGNED
+#if SPH_LITTLE_ENDIAN
+ val = sph_bswap32(val);
+#endif
+ *(sph_u32 *)dst = val;
+#else
+ if (((SPH_UPTR)dst & 3) == 0) {
+#if SPH_LITTLE_ENDIAN
+ val = sph_bswap32(val);
+#endif
+ *(sph_u32 *)dst = val;
+ } else {
+ ((unsigned char *)dst)[0] = (val >> 24);
+ ((unsigned char *)dst)[1] = (val >> 16);
+ ((unsigned char *)dst)[2] = (val >> 8);
+ ((unsigned char *)dst)[3] = val;
+ }
+#endif
+#else
+ ((unsigned char *)dst)[0] = (val >> 24);
+ ((unsigned char *)dst)[1] = (val >> 16);
+ ((unsigned char *)dst)[2] = (val >> 8);
+ ((unsigned char *)dst)[3] = val;
+#endif
+}
+
+/**
+ * Encode a 32-bit value into the provided buffer (big endian convention).
+ * The destination buffer must be properly aligned.
+ *
+ * @param dst the destination buffer (32-bit aligned)
+ * @param val the value to encode
+ */
+static SPH_INLINE void
+sph_enc32be_aligned(void *dst, sph_u32 val)
+{
+#if SPH_LITTLE_ENDIAN
+ *(sph_u32 *)dst = sph_bswap32(val);
+#elif SPH_BIG_ENDIAN
+ *(sph_u32 *)dst = val;
+#else
+ ((unsigned char *)dst)[0] = (val >> 24);
+ ((unsigned char *)dst)[1] = (val >> 16);
+ ((unsigned char *)dst)[2] = (val >> 8);
+ ((unsigned char *)dst)[3] = val;
+#endif
+}
+
+/**
+ * Decode a 32-bit value from the provided buffer (big endian convention).
+ *
+ * @param src the source buffer
+ * @return the decoded value
+ */
+static SPH_INLINE sph_u32
+sph_dec32be(const void *src)
+{
+#if defined SPH_UPTR
+#if SPH_UNALIGNED
+#if SPH_LITTLE_ENDIAN
+ return sph_bswap32(*(const sph_u32 *)src);
+#else
+ return *(const sph_u32 *)src;
+#endif
+#else
+ if (((SPH_UPTR)src & 3) == 0) {
+#if SPH_LITTLE_ENDIAN
+ return sph_bswap32(*(const sph_u32 *)src);
+#else
+ return *(const sph_u32 *)src;
+#endif
+ } else {
+ return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
+ | (sph_u32)(((const unsigned char *)src)[3]);
+ }
+#endif
+#else
+ return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
+ | (sph_u32)(((const unsigned char *)src)[3]);
+#endif
+}
+
+/**
+ * Decode a 32-bit value from the provided buffer (big endian convention).
+ * The source buffer must be properly aligned.
+ *
+ * @param src the source buffer (32-bit aligned)
+ * @return the decoded value
+ */
+static SPH_INLINE sph_u32
+sph_dec32be_aligned(const void *src)
+{
+#if SPH_LITTLE_ENDIAN
+ return sph_bswap32(*(const sph_u32 *)src);
+#elif SPH_BIG_ENDIAN
+ return *(const sph_u32 *)src;
+#else
+ return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
+ | (sph_u32)(((const unsigned char *)src)[3]);
+#endif
+}
+
+/**
+ * Encode a 32-bit value into the provided buffer (little endian convention).
+ *
+ * @param dst the destination buffer
+ * @param val the 32-bit value to encode
+ */
+static SPH_INLINE void
+sph_enc32le(void *dst, sph_u32 val)
+{
+#if defined SPH_UPTR
+#if SPH_UNALIGNED
+#if SPH_BIG_ENDIAN
+ val = sph_bswap32(val);
+#endif
+ *(sph_u32 *)dst = val;
+#else
+ if (((SPH_UPTR)dst & 3) == 0) {
+#if SPH_BIG_ENDIAN
+ val = sph_bswap32(val);
+#endif
+ *(sph_u32 *)dst = val;
+ } else {
+ ((unsigned char *)dst)[0] = val;
+ ((unsigned char *)dst)[1] = (val >> 8);
+ ((unsigned char *)dst)[2] = (val >> 16);
+ ((unsigned char *)dst)[3] = (val >> 24);
+ }
+#endif
+#else
+ ((unsigned char *)dst)[0] = val;
+ ((unsigned char *)dst)[1] = (val >> 8);
+ ((unsigned char *)dst)[2] = (val >> 16);
+ ((unsigned char *)dst)[3] = (val >> 24);
+#endif
+}
+
+/**
+ * Encode a 32-bit value into the provided buffer (little endian convention).
+ * The destination buffer must be properly aligned.
+ *
+ * @param dst the destination buffer (32-bit aligned)
+ * @param val the value to encode
+ */
+static SPH_INLINE void
+sph_enc32le_aligned(void *dst, sph_u32 val)
+{
+#if SPH_LITTLE_ENDIAN
+ *(sph_u32 *)dst = val;
+#elif SPH_BIG_ENDIAN
+ *(sph_u32 *)dst = sph_bswap32(val);
+#else
+ ((unsigned char *)dst)[0] = val;
+ ((unsigned char *)dst)[1] = (val >> 8);
+ ((unsigned char *)dst)[2] = (val >> 16);
+ ((unsigned char *)dst)[3] = (val >> 24);
+#endif
+}
+
+/**
+ * Decode a 32-bit value from the provided buffer (little endian convention).
+ *
+ * @param src the source buffer
+ * @return the decoded value
+ */
+static SPH_INLINE sph_u32
+sph_dec32le(const void *src)
+{
+#if defined SPH_UPTR
+#if SPH_UNALIGNED
+#if SPH_BIG_ENDIAN
+ return sph_bswap32(*(const sph_u32 *)src);
+#else
+ return *(const sph_u32 *)src;
+#endif
+#else
+ if (((SPH_UPTR)src & 3) == 0) {
+#if SPH_BIG_ENDIAN
+#if SPH_SPARCV9_GCC && !SPH_NO_ASM
+ sph_u32 tmp;
+
+ /*
+ * "__volatile__" is needed here because without it,
+ * gcc-3.4.3 miscompiles the code and performs the
+ * access before the test on the address, thus triggering
+ * a bus error...
+ */
+ __asm__ __volatile__ (
+ "lda [%1]0x88,%0" : "=r" (tmp) : "r" (src));
+ return tmp;
+/*
+ * On PowerPC, this turns out not to be worth the effort: the inline
+ * assembly makes GCC optimizer uncomfortable, which tends to nullify
+ * the decoding gains.
+ *
+ * For most hash functions, using this inline assembly trick changes
+ * hashing speed by less than 5% and often _reduces_ it. The biggest
+ * gains are for MD4 (+11%) and CubeHash (+30%). For all others, it is
+ * less then 10%. The speed gain on CubeHash is probably due to the
+ * chronic shortage of registers that CubeHash endures; for the other
+ * functions, the generic code appears to be efficient enough already.
+ *
+#elif (SPH_PPC32_GCC || SPH_PPC64_GCC) && !SPH_NO_ASM
+ sph_u32 tmp;
+
+ __asm__ __volatile__ (
+ "lwbrx %0,0,%1" : "=r" (tmp) : "r" (src));
+ return tmp;
+ */
+#else
+ return sph_bswap32(*(const sph_u32 *)src);
+#endif
+#else
+ return *(const sph_u32 *)src;
+#endif
+ } else {
+ return (sph_u32)(((const unsigned char *)src)[0])
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
+ }
+#endif
+#else
+ return (sph_u32)(((const unsigned char *)src)[0])
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
+#endif
+}
+
+/**
+ * Decode a 32-bit value from the provided buffer (little endian convention).
+ * The source buffer must be properly aligned.
+ *
+ * @param src the source buffer (32-bit aligned)
+ * @return the decoded value
+ */
+static SPH_INLINE sph_u32
+sph_dec32le_aligned(const void *src)
+{
+#if SPH_LITTLE_ENDIAN
+ return *(const sph_u32 *)src;
+#elif SPH_BIG_ENDIAN
+#if SPH_SPARCV9_GCC && !SPH_NO_ASM
+ sph_u32 tmp;
+
+ __asm__ __volatile__ ("lda [%1]0x88,%0" : "=r" (tmp) : "r" (src));
+ return tmp;
+/*
+ * Not worth it generally.
+ *
+#elif (SPH_PPC32_GCC || SPH_PPC64_GCC) && !SPH_NO_ASM
+ sph_u32 tmp;
+
+ __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (tmp) : "r" (src));
+ return tmp;
+ */
+#else
+ return sph_bswap32(*(const sph_u32 *)src);
+#endif
+#else
+ return (sph_u32)(((const unsigned char *)src)[0])
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
+#endif
+}
+
+#if SPH_64
+
+/**
+ * Encode a 64-bit value into the provided buffer (big endian convention).
+ *
+ * @param dst the destination buffer
+ * @param val the 64-bit value to encode
+ */
+static SPH_INLINE void
+sph_enc64be(void *dst, sph_u64 val)
+{
+#if defined SPH_UPTR
+#if SPH_UNALIGNED
+#if SPH_LITTLE_ENDIAN
+ val = sph_bswap64(val);
+#endif
+ *(sph_u64 *)dst = val;
+#else
+ if (((SPH_UPTR)dst & 7) == 0) {
+#if SPH_LITTLE_ENDIAN
+ val = sph_bswap64(val);
+#endif
+ *(sph_u64 *)dst = val;
+ } else {
+ ((unsigned char *)dst)[0] = (val >> 56);
+ ((unsigned char *)dst)[1] = (val >> 48);
+ ((unsigned char *)dst)[2] = (val >> 40);
+ ((unsigned char *)dst)[3] = (val >> 32);
+ ((unsigned char *)dst)[4] = (val >> 24);
+ ((unsigned char *)dst)[5] = (val >> 16);
+ ((unsigned char *)dst)[6] = (val >> 8);
+ ((unsigned char *)dst)[7] = val;
+ }
+#endif
+#else
+ ((unsigned char *)dst)[0] = (val >> 56);
+ ((unsigned char *)dst)[1] = (val >> 48);
+ ((unsigned char *)dst)[2] = (val >> 40);
+ ((unsigned char *)dst)[3] = (val >> 32);
+ ((unsigned char *)dst)[4] = (val >> 24);
+ ((unsigned char *)dst)[5] = (val >> 16);
+ ((unsigned char *)dst)[6] = (val >> 8);
+ ((unsigned char *)dst)[7] = val;
+#endif
+}
+
+/**
+ * Encode a 64-bit value into the provided buffer (big endian convention).
+ * The destination buffer must be properly aligned.
+ *
+ * @param dst the destination buffer (64-bit aligned)
+ * @param val the value to encode
+ */
+static SPH_INLINE void
+sph_enc64be_aligned(void *dst, sph_u64 val)
+{
+#if SPH_LITTLE_ENDIAN
+ *(sph_u64 *)dst = sph_bswap64(val);
+#elif SPH_BIG_ENDIAN
+ *(sph_u64 *)dst = val;
+#else
+ ((unsigned char *)dst)[0] = (val >> 56);
+ ((unsigned char *)dst)[1] = (val >> 48);
+ ((unsigned char *)dst)[2] = (val >> 40);
+ ((unsigned char *)dst)[3] = (val >> 32);
+ ((unsigned char *)dst)[4] = (val >> 24);
+ ((unsigned char *)dst)[5] = (val >> 16);
+ ((unsigned char *)dst)[6] = (val >> 8);
+ ((unsigned char *)dst)[7] = val;
+#endif
+}
+
+/**
+ * Decode a 64-bit value from the provided buffer (big endian convention).
+ *
+ * @param src the source buffer
+ * @return the decoded value
+ */
+static SPH_INLINE sph_u64
+sph_dec64be(const void *src)
+{
+#if defined SPH_UPTR
+#if SPH_UNALIGNED
+#if SPH_LITTLE_ENDIAN
+ return sph_bswap64(*(const sph_u64 *)src);
+#else
+ return *(const sph_u64 *)src;
+#endif
+#else
+ if (((SPH_UPTR)src & 7) == 0) {
+#if SPH_LITTLE_ENDIAN
+ return sph_bswap64(*(const sph_u64 *)src);
+#else
+ return *(const sph_u64 *)src;
+#endif
+ } else {
+ return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
+ | (sph_u64)(((const unsigned char *)src)[7]);
+ }
+#endif
+#else
+ return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
+ | (sph_u64)(((const unsigned char *)src)[7]);
+#endif
+}
+
+/**
+ * Decode a 64-bit value from the provided buffer (big endian convention).
+ * The source buffer must be properly aligned.
+ *
+ * @param src the source buffer (64-bit aligned)
+ * @return the decoded value
+ */
+static SPH_INLINE sph_u64
+sph_dec64be_aligned(const void *src)
+{
+#if SPH_LITTLE_ENDIAN
+ return sph_bswap64(*(const sph_u64 *)src);
+#elif SPH_BIG_ENDIAN
+ return *(const sph_u64 *)src;
+#else
+ return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
+ | (sph_u64)(((const unsigned char *)src)[7]);
+#endif
+}
+
+/**
+ * Encode a 64-bit value into the provided buffer (little endian convention).
+ *
+ * @param dst the destination buffer
+ * @param val the 64-bit value to encode
+ */
+static SPH_INLINE void
+sph_enc64le(void *dst, sph_u64 val)
+{
+#if defined SPH_UPTR
+#if SPH_UNALIGNED
+#if SPH_BIG_ENDIAN
+ val = sph_bswap64(val);
+#endif
+ *(sph_u64 *)dst = val;
+#else
+ if (((SPH_UPTR)dst & 7) == 0) {
+#if SPH_BIG_ENDIAN
+ val = sph_bswap64(val);
+#endif
+ *(sph_u64 *)dst = val;
+ } else {
+ ((unsigned char *)dst)[0] = val;
+ ((unsigned char *)dst)[1] = (val >> 8);
+ ((unsigned char *)dst)[2] = (val >> 16);
+ ((unsigned char *)dst)[3] = (val >> 24);
+ ((unsigned char *)dst)[4] = (val >> 32);
+ ((unsigned char *)dst)[5] = (val >> 40);
+ ((unsigned char *)dst)[6] = (val >> 48);
+ ((unsigned char *)dst)[7] = (val >> 56);
+ }
+#endif
+#else
+ ((unsigned char *)dst)[0] = val;
+ ((unsigned char *)dst)[1] = (val >> 8);
+ ((unsigned char *)dst)[2] = (val >> 16);
+ ((unsigned char *)dst)[3] = (val >> 24);
+ ((unsigned char *)dst)[4] = (val >> 32);
+ ((unsigned char *)dst)[5] = (val >> 40);
+ ((unsigned char *)dst)[6] = (val >> 48);
+ ((unsigned char *)dst)[7] = (val >> 56);
+#endif
+}
+
+/**
+ * Encode a 64-bit value into the provided buffer (little endian convention).
+ * The destination buffer must be properly aligned.
+ *
+ * @param dst the destination buffer (64-bit aligned)
+ * @param val the value to encode
+ */
+static SPH_INLINE void
+sph_enc64le_aligned(void *dst, sph_u64 val)
+{
+#if SPH_LITTLE_ENDIAN
+ *(sph_u64 *)dst = val;
+#elif SPH_BIG_ENDIAN
+ *(sph_u64 *)dst = sph_bswap64(val);
+#else
+ ((unsigned char *)dst)[0] = val;
+ ((unsigned char *)dst)[1] = (val >> 8);
+ ((unsigned char *)dst)[2] = (val >> 16);
+ ((unsigned char *)dst)[3] = (val >> 24);
+ ((unsigned char *)dst)[4] = (val >> 32);
+ ((unsigned char *)dst)[5] = (val >> 40);
+ ((unsigned char *)dst)[6] = (val >> 48);
+ ((unsigned char *)dst)[7] = (val >> 56);
+#endif
+}
+
+/**
+ * Decode a 64-bit value from the provided buffer (little endian convention).
+ *
+ * @param src the source buffer
+ * @return the decoded value
+ */
+static SPH_INLINE sph_u64
+sph_dec64le(const void *src)
+{
+#if defined SPH_UPTR
+#if SPH_UNALIGNED
+#if SPH_BIG_ENDIAN
+ return sph_bswap64(*(const sph_u64 *)src);
+#else
+ return *(const sph_u64 *)src;
+#endif
+#else
+ if (((SPH_UPTR)src & 7) == 0) {
+#if SPH_BIG_ENDIAN
+#if SPH_SPARCV9_GCC_64 && !SPH_NO_ASM
+ sph_u64 tmp;
+
+ __asm__ __volatile__ (
+ "ldxa [%1]0x88,%0" : "=r" (tmp) : "r" (src));
+ return tmp;
+/*
+ * Not worth it generally.
+ *
+#elif SPH_PPC32_GCC && !SPH_NO_ASM
+ return (sph_u64)sph_dec32le_aligned(src)
+ | ((sph_u64)sph_dec32le_aligned(
+ (const char *)src + 4) << 32);
+#elif SPH_PPC64_GCC && !SPH_NO_ASM
+ sph_u64 tmp;
+
+ __asm__ __volatile__ (
+ "ldbrx %0,0,%1" : "=r" (tmp) : "r" (src));
+ return tmp;
+ */
+#else
+ return sph_bswap64(*(const sph_u64 *)src);
+#endif
+#else
+ return *(const sph_u64 *)src;
+#endif
+ } else {
+ return (sph_u64)(((const unsigned char *)src)[0])
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
+ }
+#endif
+#else
+ return (sph_u64)(((const unsigned char *)src)[0])
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
+#endif
+}
+
+/**
+ * Decode a 64-bit value from the provided buffer (little endian convention).
+ * The source buffer must be properly aligned.
+ *
+ * @param src the source buffer (64-bit aligned)
+ * @return the decoded value
+ */
+static SPH_INLINE sph_u64
+sph_dec64le_aligned(const void *src)
+{
+#if SPH_LITTLE_ENDIAN
+ return *(const sph_u64 *)src;
+#elif SPH_BIG_ENDIAN
+#if SPH_SPARCV9_GCC_64 && !SPH_NO_ASM
+ sph_u64 tmp;
+
+ __asm__ __volatile__ ("ldxa [%1]0x88,%0" : "=r" (tmp) : "r" (src));
+ return tmp;
+/*
+ * Not worth it generally.
+ *
+#elif SPH_PPC32_GCC && !SPH_NO_ASM
+ return (sph_u64)sph_dec32le_aligned(src)
+ | ((sph_u64)sph_dec32le_aligned((const char *)src + 4) << 32);
+#elif SPH_PPC64_GCC && !SPH_NO_ASM
+ sph_u64 tmp;
+
+ __asm__ __volatile__ ("ldbrx %0,0,%1" : "=r" (tmp) : "r" (src));
+ return tmp;
+ */
+#else
+ return sph_bswap64(*(const sph_u64 *)src);
+#endif
+#else
+ return (sph_u64)(((const unsigned char *)src)[0])
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
+#endif
+}
+
+#endif
+
+#endif /* Doxygen excluded block */
+
+#endif