aboutsummaryrefslogtreecommitdiffstats
path: root/libqpdf/sph
diff options
context:
space:
mode:
authorJay Berkenbilt <ejb@ql.org>2022-02-08 15:18:08 +0100
committerJay Berkenbilt <ejb@ql.org>2022-02-08 17:51:15 +0100
commitcb769c62e55599e9f980001830bc61d9fcaa64a9 (patch)
tree0bf980c385a61cbc8720cf990762ffc1200f9d6a /libqpdf/sph
parent716381f65a2b2dc72f8da2426ba71aeab02c507f (diff)
downloadqpdf-cb769c62e55599e9f980001830bc61d9fcaa64a9.tar.zst
WHITESPACE ONLY -- expand tabs in source code
This comment expands all tabs using an 8-character tab-width. You should ignore this commit when using git blame or use git blame -w. In the early days, I used to use tabs where possible for indentation, since emacs did this automatically. In recent years, I have switched to only using spaces, which means qpdf source code has been a mixture of spaces and tabs. I have avoided cleaning this up because of not wanting gratuitous whitespaces change to cloud the output of git blame, but I changed my mind after discussing with users who view qpdf source code in editors/IDEs that have other tab widths by default and in light of the fact that I am planning to start applying automatic code formatting soon.
Diffstat (limited to 'libqpdf/sph')
-rw-r--r--libqpdf/sph/md_helper.c262
-rw-r--r--libqpdf/sph/sph_sha2.h14
-rw-r--r--libqpdf/sph/sph_types.h612
3 files changed, 444 insertions, 444 deletions
diff --git a/libqpdf/sph/md_helper.c b/libqpdf/sph/md_helper.c
index 829391a7..5c114515 100644
--- a/libqpdf/sph/md_helper.c
+++ b/libqpdf/sph/md_helper.c
@@ -128,103 +128,103 @@ void
SPH_XCAT(sph_, HASH)(void *cc, const void *data, size_t len)
#endif
{
- SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
- unsigned current;
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current;
- sc = cc;
+ sc = cc;
#if SPH_64
- current = (unsigned)sc->count & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
#else
- current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
#endif
- while (len > 0) {
- unsigned clen;
+ while (len > 0) {
+ unsigned clen;
#if !SPH_64
- sph_u32 clow, clow2;
+ sph_u32 clow, clow2;
#endif
- clen = SPH_BLEN - current;
- if (clen > len)
- clen = (unsigned)len;
- memcpy(sc->buf + current, data, clen);
- data = (const unsigned char *)data + clen;
- current += clen;
- len -= clen;
- if (current == SPH_BLEN) {
- RFUN(sc->buf, SPH_VAL);
- current = 0;
- }
+ clen = SPH_BLEN - current;
+ if (clen > len)
+ clen = (unsigned)len;
+ memcpy(sc->buf + current, data, clen);
+ data = (const unsigned char *)data + clen;
+ current += clen;
+ len -= clen;
+ if (current == SPH_BLEN) {
+ RFUN(sc->buf, SPH_VAL);
+ current = 0;
+ }
#if SPH_64
- sc->count += clen;
+ sc->count += clen;
#else
- clow = sc->count_low;
- clow2 = SPH_T32(clow + clen);
- sc->count_low = clow2;
- if (clow2 < clow)
- sc->count_high ++;
+ clow = sc->count_low;
+ clow2 = SPH_T32(clow + clen);
+ sc->count_low = clow2;
+ if (clow2 < clow)
+ sc->count_high ++;
#endif
- }
+ }
}
#ifdef SPH_UPTR
void
SPH_XCAT(sph_, HASH)(void *cc, const void *data, size_t len)
{
- SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
- unsigned current;
- size_t orig_len;
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current;
+ size_t orig_len;
#if !SPH_64
- sph_u32 clow, clow2;
+ sph_u32 clow, clow2;
#endif
- if (len < (2 * SPH_BLEN)) {
- SPH_XCAT(HASH, _short)(cc, data, len);
- return;
- }
- sc = cc;
+ if (len < (2 * SPH_BLEN)) {
+ SPH_XCAT(HASH, _short)(cc, data, len);
+ return;
+ }
+ sc = cc;
#if SPH_64
- current = (unsigned)sc->count & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
#else
- current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
#endif
- if (current > 0) {
- unsigned t;
+ if (current > 0) {
+ unsigned t;
- t = SPH_BLEN - current;
- SPH_XCAT(HASH, _short)(cc, data, t);
- data = (const unsigned char *)data + t;
- len -= t;
- }
+ t = SPH_BLEN - current;
+ SPH_XCAT(HASH, _short)(cc, data, t);
+ data = (const unsigned char *)data + t;
+ len -= t;
+ }
#if !SPH_UNALIGNED
- if (((SPH_UPTR)data & (SPH_WLEN - 1U)) != 0) {
- SPH_XCAT(HASH, _short)(cc, data, len);
- return;
- }
+ if (((SPH_UPTR)data & (SPH_WLEN - 1U)) != 0) {
+ SPH_XCAT(HASH, _short)(cc, data, len);
+ return;
+ }
#endif
- orig_len = len;
- while (len >= SPH_BLEN) {
- RFUN(data, SPH_VAL);
- len -= SPH_BLEN;
- data = (const unsigned char *)data + SPH_BLEN;
- }
- if (len > 0)
- memcpy(sc->buf, data, len);
+ orig_len = len;
+ while (len >= SPH_BLEN) {
+ RFUN(data, SPH_VAL);
+ len -= SPH_BLEN;
+ data = (const unsigned char *)data + SPH_BLEN;
+ }
+ if (len > 0)
+ memcpy(sc->buf, data, len);
#if SPH_64
- sc->count += (sph_u64)orig_len;
+ sc->count += (sph_u64)orig_len;
#else
- clow = sc->count_low;
- clow2 = SPH_T32(clow + orig_len);
- sc->count_low = clow2;
- if (clow2 < clow)
- sc->count_high ++;
- /*
- * This code handles the improbable situation where "size_t" is
- * greater than 32 bits, and yet we do not have a 64-bit type.
- */
- orig_len >>= 12;
- orig_len >>= 10;
- orig_len >>= 10;
- sc->count_high += orig_len;
+ clow = sc->count_low;
+ clow2 = SPH_T32(clow + orig_len);
+ sc->count_low = clow2;
+ if (clow2 < clow)
+ sc->count_high ++;
+ /*
+ * This code handles the improbable situation where "size_t" is
+ * greater than 32 bits, and yet we do not have a 64-bit type.
+ */
+ orig_len >>= 12;
+ orig_len >>= 10;
+ orig_len >>= 10;
+ sc->count_high += orig_len;
#endif
}
#endif
@@ -237,110 +237,110 @@ SPH_XCAT(sph_, HASH)(void *cc, const void *data, size_t len)
*/
static void
SPH_XCAT(HASH, _addbits_and_close)(void *cc,
- unsigned ub, unsigned n, void *dst, unsigned rnum)
+ unsigned ub, unsigned n, void *dst, unsigned rnum)
{
- SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
- unsigned current, u;
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current, u;
#if !SPH_64
- sph_u32 low, high;
+ sph_u32 low, high;
#endif
- sc = cc;
+ sc = cc;
#if SPH_64
- current = (unsigned)sc->count & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
#else
- current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
#endif
#ifdef PW01
- sc->buf[current ++] = (0x100 | (ub & 0xFF)) >> (8 - n);
+ sc->buf[current ++] = (0x100 | (ub & 0xFF)) >> (8 - n);
#else
- {
- unsigned z;
+ {
+ unsigned z;
- z = 0x80U >> n;
- sc->buf[current ++] = ((ub & -z) | z) & 0xFF;
- }
+ z = 0x80U >> n;
+ sc->buf[current ++] = ((ub & -z) | z) & 0xFF;
+ }
#endif
- if (current > SPH_MAXPAD) {
- memset(sc->buf + current, 0, SPH_BLEN - current);
- RFUN(sc->buf, SPH_VAL);
- memset(sc->buf, 0, SPH_MAXPAD);
- } else {
- memset(sc->buf + current, 0, SPH_MAXPAD - current);
- }
+ if (current > SPH_MAXPAD) {
+ memset(sc->buf + current, 0, SPH_BLEN - current);
+ RFUN(sc->buf, SPH_VAL);
+ memset(sc->buf, 0, SPH_MAXPAD);
+ } else {
+ memset(sc->buf + current, 0, SPH_MAXPAD - current);
+ }
#if defined BE64
#if defined PLW1
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#elif defined PLW4
- memset(sc->buf + SPH_MAXPAD, 0, 2 * SPH_WLEN);
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN,
- sc->count >> 61);
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 3 * SPH_WLEN,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ memset(sc->buf + SPH_MAXPAD, 0, 2 * SPH_WLEN);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN,
+ sc->count >> 61);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 3 * SPH_WLEN,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#else
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD, sc->count >> 61);
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD, sc->count >> 61);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#endif
#elif defined LE64
#if defined PLW1
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#elif defined PLW1
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
- memset(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN, 0, 2 * SPH_WLEN);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
+ memset(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN, 0, 2 * SPH_WLEN);
#else
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
#endif
#else
#if SPH_64
#ifdef BE32
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#else
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#endif
#else
- low = sc->count_low;
- high = SPH_T32((sc->count_high << 3) | (low >> 29));
- low = SPH_T32(low << 3) + (sph_u32)n;
+ low = sc->count_low;
+ high = SPH_T32((sc->count_high << 3) | (low >> 29));
+ low = SPH_T32(low << 3) + (sph_u32)n;
#ifdef BE32
- sph_enc32be(sc->buf + SPH_MAXPAD, high);
- sph_enc32be(sc->buf + SPH_MAXPAD + SPH_WLEN, low);
+ sph_enc32be(sc->buf + SPH_MAXPAD, high);
+ sph_enc32be(sc->buf + SPH_MAXPAD + SPH_WLEN, low);
#else
- sph_enc32le(sc->buf + SPH_MAXPAD, low);
- sph_enc32le(sc->buf + SPH_MAXPAD + SPH_WLEN, high);
+ sph_enc32le(sc->buf + SPH_MAXPAD, low);
+ sph_enc32le(sc->buf + SPH_MAXPAD + SPH_WLEN, high);
#endif
#endif
#endif
- RFUN(sc->buf, SPH_VAL);
+ RFUN(sc->buf, SPH_VAL);
#ifdef SPH_NO_OUTPUT
- (void)dst;
- (void)rnum;
- (void)u;
+ (void)dst;
+ (void)rnum;
+ (void)u;
#else
- for (u = 0; u < rnum; u ++) {
+ for (u = 0; u < rnum; u ++) {
#if defined BE64
- sph_enc64be((unsigned char *)dst + 8 * u, sc->val[u]);
+ sph_enc64be((unsigned char *)dst + 8 * u, sc->val[u]);
#elif defined LE64
- sph_enc64le((unsigned char *)dst + 8 * u, sc->val[u]);
+ sph_enc64le((unsigned char *)dst + 8 * u, sc->val[u]);
#elif defined BE32
- sph_enc32be((unsigned char *)dst + 4 * u, sc->val[u]);
+ sph_enc32be((unsigned char *)dst + 4 * u, sc->val[u]);
#else
- sph_enc32le((unsigned char *)dst + 4 * u, sc->val[u]);
+ sph_enc32le((unsigned char *)dst + 4 * u, sc->val[u]);
#endif
- }
+ }
#endif
}
static void
SPH_XCAT(HASH, _close)(void *cc, void *dst, unsigned rnum)
{
- SPH_XCAT(HASH, _addbits_and_close)(cc, 0, 0, dst, rnum);
+ SPH_XCAT(HASH, _addbits_and_close)(cc, 0, 0, dst, rnum);
}
diff --git a/libqpdf/sph/sph_sha2.h b/libqpdf/sph/sph_sha2.h
index dde62088..dbab2947 100644
--- a/libqpdf/sph/sph_sha2.h
+++ b/libqpdf/sph/sph_sha2.h
@@ -69,12 +69,12 @@ extern "C" {
*/
typedef struct {
#ifndef DOXYGEN_IGNORE
- unsigned char buf[64]; /* first field, for alignment */
- sph_u32 val[8];
+ unsigned char buf[64]; /* first field, for alignment */
+ sph_u32 val[8];
#if SPH_64
- sph_u64 count;
+ sph_u64 count;
#else
- sph_u32 count_high, count_low;
+ sph_u32 count_high, count_low;
#endif
#endif
} sph_sha224_context;
@@ -233,9 +233,9 @@ void sph_sha256_comp(const sph_u32 msg[16], sph_u32 val[8]);
*/
typedef struct {
#ifndef DOXYGEN_IGNORE
- unsigned char buf[128]; /* first field, for alignment */
- sph_u64 val[8];
- sph_u64 count;
+ unsigned char buf[128]; /* first field, for alignment */
+ sph_u64 val[8];
+ sph_u64 count;
#endif
} sph_sha384_context;
diff --git a/libqpdf/sph/sph_types.h b/libqpdf/sph/sph_types.h
index ce2a1db1..25beac3e 100644
--- a/libqpdf/sph/sph_types.h
+++ b/libqpdf/sph/sph_types.h
@@ -1033,7 +1033,7 @@ typedef long long sph_s64;
* 64-bit Sparc architecture (implies v9).
*/
#elif ((defined __sparc__ || defined __sparc) && defined __arch64__) \
- || defined __sparcv9
+ || defined __sparcv9
#define SPH_DETECT_BIG_ENDIAN 1
#define SPH_DETECT_UPTR sph_u64
@@ -1046,7 +1046,7 @@ typedef long long sph_s64;
* 32-bit Sparc.
*/
#elif (defined __sparc__ || defined __sparc) \
- && !(defined __sparcv9 || defined __arch64__)
+ && !(defined __sparcv9 || defined __arch64__)
#define SPH_DETECT_BIG_ENDIAN 1
#define SPH_DETECT_UPTR sph_u32
@@ -1080,7 +1080,7 @@ typedef long long sph_s64;
* PowerPC.
*/
#elif defined __powerpc__ || defined __POWERPC__ || defined __ppc__ \
- || defined _ARCH_PPC
+ || defined _ARCH_PPC
/*
* Note: we do not declare cross-endian access to be "fast": even if
@@ -1106,7 +1106,7 @@ typedef long long sph_s64;
* Itanium, 64-bit.
*/
#elif defined __ia64 || defined __ia64__ \
- || defined __itanium__ || defined _M_IA64
+ || defined __itanium__ || defined _M_IA64
#if defined __BIG_ENDIAN__ || defined _BIG_ENDIAN
#define SPH_DETECT_BIG_ENDIAN 1
@@ -1192,8 +1192,8 @@ typedef long long sph_s64;
static SPH_INLINE sph_u32
sph_bswap32(sph_u32 x)
{
- __asm__ __volatile__ ("bswapl %0" : "=r" (x) : "0" (x));
- return x;
+ __asm__ __volatile__ ("bswapl %0" : "=r" (x) : "0" (x));
+ return x;
}
#if SPH_64
@@ -1201,8 +1201,8 @@ sph_bswap32(sph_u32 x)
static SPH_INLINE sph_u64
sph_bswap64(sph_u64 x)
{
- return ((sph_u64)sph_bswap32((sph_u32)x) << 32)
- | (sph_u64)sph_bswap32((sph_u32)(x >> 32));
+ return ((sph_u64)sph_bswap32((sph_u32)x) << 32)
+ | (sph_u64)sph_bswap32((sph_u32)(x >> 32));
}
#endif
@@ -1217,8 +1217,8 @@ sph_bswap64(sph_u64 x)
static SPH_INLINE sph_u32
sph_bswap32(sph_u32 x)
{
- __asm__ __volatile__ ("bswapl %0" : "=r" (x) : "0" (x));
- return x;
+ __asm__ __volatile__ ("bswapl %0" : "=r" (x) : "0" (x));
+ return x;
}
#if SPH_64
@@ -1226,8 +1226,8 @@ sph_bswap32(sph_u32 x)
static SPH_INLINE sph_u64
sph_bswap64(sph_u64 x)
{
- __asm__ __volatile__ ("bswapq %0" : "=r" (x) : "0" (x));
- return x;
+ __asm__ __volatile__ ("bswapq %0" : "=r" (x) : "0" (x));
+ return x;
}
#endif
@@ -1243,11 +1243,11 @@ sph_bswap64(sph_u64 x)
static __inline sph_u32 __declspec(naked) __fastcall
sph_bswap32(sph_u32 x)
{
- __asm {
- bswap ecx
- mov eax,ecx
- ret
- }
+ __asm {
+ bswap ecx
+ mov eax,ecx
+ ret
+ }
}
#if SPH_64
@@ -1255,8 +1255,8 @@ sph_bswap32(sph_u32 x)
static SPH_INLINE sph_u64
sph_bswap64(sph_u64 x)
{
- return ((sph_u64)sph_bswap32((sph_u32)x) << 32)
- | (sph_u64)sph_bswap32((sph_u32)(x >> 32));
+ return ((sph_u64)sph_bswap32((sph_u32)x) << 32)
+ | (sph_u64)sph_bswap32((sph_u32)(x >> 32));
}
#endif
@@ -1270,10 +1270,10 @@ sph_bswap64(sph_u64 x)
static SPH_INLINE sph_u32
sph_bswap32(sph_u32 x)
{
- x = SPH_T32((x << 16) | (x >> 16));
- x = ((x & SPH_C32(0xFF00FF00)) >> 8)
- | ((x & SPH_C32(0x00FF00FF)) << 8);
- return x;
+ x = SPH_T32((x << 16) | (x >> 16));
+ x = ((x & SPH_C32(0xFF00FF00)) >> 8)
+ | ((x & SPH_C32(0x00FF00FF)) << 8);
+ return x;
}
#if SPH_64
@@ -1287,12 +1287,12 @@ sph_bswap32(sph_u32 x)
static SPH_INLINE sph_u64
sph_bswap64(sph_u64 x)
{
- x = SPH_T64((x << 32) | (x >> 32));
- x = ((x & SPH_C64(0xFFFF0000FFFF0000)) >> 16)
- | ((x & SPH_C64(0x0000FFFF0000FFFF)) << 16);
- x = ((x & SPH_C64(0xFF00FF00FF00FF00)) >> 8)
- | ((x & SPH_C64(0x00FF00FF00FF00FF)) << 8);
- return x;
+ x = SPH_T64((x << 32) | (x >> 32));
+ x = ((x & SPH_C64(0xFFFF0000FFFF0000)) >> 16)
+ | ((x & SPH_C64(0x0000FFFF0000FFFF)) << 16);
+ x = ((x & SPH_C64(0xFF00FF00FF00FF00)) >> 8)
+ | ((x & SPH_C64(0x00FF00FF00FF00FF)) << 8);
+ return x;
}
#endif
@@ -1318,48 +1318,48 @@ sph_bswap64(sph_u64 x)
*/
#define SPH_SPARCV9_SET_ASI \
- sph_u32 sph_sparcv9_asi; \
- __asm__ __volatile__ ( \
- "rd %%asi,%0\n\twr %%g0,0x88,%%asi" : "=r" (sph_sparcv9_asi));
+ sph_u32 sph_sparcv9_asi; \
+ __asm__ __volatile__ ( \
+ "rd %%asi,%0\n\twr %%g0,0x88,%%asi" : "=r" (sph_sparcv9_asi));
#define SPH_SPARCV9_RESET_ASI \
- __asm__ __volatile__ ("wr %%g0,%0,%%asi" : : "r" (sph_sparcv9_asi));
+ __asm__ __volatile__ ("wr %%g0,%0,%%asi" : : "r" (sph_sparcv9_asi));
#define SPH_SPARCV9_DEC32LE(base, idx) ({ \
- sph_u32 sph_sparcv9_tmp; \
- __asm__ __volatile__ ("lda [%1+" #idx "*4]%%asi,%0" \
- : "=r" (sph_sparcv9_tmp) : "r" (base)); \
- sph_sparcv9_tmp; \
- })
+ sph_u32 sph_sparcv9_tmp; \
+ __asm__ __volatile__ ("lda [%1+" #idx "*4]%%asi,%0" \
+ : "=r" (sph_sparcv9_tmp) : "r" (base)); \
+ sph_sparcv9_tmp; \
+ })
#endif
static SPH_INLINE void
sph_enc16be(void *dst, unsigned val)
{
- ((unsigned char *)dst)[0] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[1] = (unsigned char)val;
+ ((unsigned char *)dst)[0] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[1] = (unsigned char)val;
}
static SPH_INLINE unsigned
sph_dec16be(const void *src)
{
- return ((unsigned)(((const unsigned char *)src)[0]) << 8)
- | (unsigned)(((const unsigned char *)src)[1]);
+ return ((unsigned)(((const unsigned char *)src)[0]) << 8)
+ | (unsigned)(((const unsigned char *)src)[1]);
}
static SPH_INLINE void
sph_enc16le(void *dst, unsigned val)
{
- ((unsigned char *)dst)[0] = (unsigned char)val;
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[0] = (unsigned char)val;
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
}
static SPH_INLINE unsigned
sph_dec16le(const void *src)
{
- return (unsigned)(((const unsigned char *)src)[0])
- | ((unsigned)(((const unsigned char *)src)[1]) << 8);
+ return (unsigned)(((const unsigned char *)src)[0])
+ | ((unsigned)(((const unsigned char *)src)[1]) << 8);
}
/**
@@ -1374,27 +1374,27 @@ sph_enc32be(void *dst, sph_u32 val)
#if defined SPH_UPTR
#if SPH_UNALIGNED
#if SPH_LITTLE_ENDIAN
- val = sph_bswap32(val);
+ val = sph_bswap32(val);
#endif
- *(sph_u32 *)dst = val;
+ *(sph_u32 *)dst = val;
#else
- if (((SPH_UPTR)dst & 3) == 0) {
+ if (((SPH_UPTR)dst & 3) == 0) {
#if SPH_LITTLE_ENDIAN
- val = sph_bswap32(val);
+ val = sph_bswap32(val);
#endif
- *(sph_u32 *)dst = val;
- } else {
- ((unsigned char *)dst)[0] = (unsigned char)(val >> 24);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[3] = (unsigned char)(val);
- }
+ *(sph_u32 *)dst = val;
+ } else {
+ ((unsigned char *)dst)[0] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[3] = (unsigned char)(val);
+ }
#endif
#else
- ((unsigned char *)dst)[0] = (unsigned char)(val >> 24);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[3] = (unsigned char)(val);
+ ((unsigned char *)dst)[0] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[3] = (unsigned char)(val);
#endif
}
@@ -1409,14 +1409,14 @@ static SPH_INLINE void
sph_enc32be_aligned(void *dst, sph_u32 val)
{
#if SPH_LITTLE_ENDIAN
- *(sph_u32 *)dst = sph_bswap32(val);
+ *(sph_u32 *)dst = sph_bswap32(val);
#elif SPH_BIG_ENDIAN
- *(sph_u32 *)dst = val;
+ *(sph_u32 *)dst = val;
#else
- ((unsigned char *)dst)[0] = (unsigned char)(val >> 24);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[3] = (unsigned char)(val);
+ ((unsigned char *)dst)[0] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[3] = (unsigned char)(val);
#endif
}
@@ -1432,29 +1432,29 @@ sph_dec32be(const void *src)
#if defined SPH_UPTR
#if SPH_UNALIGNED
#if SPH_LITTLE_ENDIAN
- return sph_bswap32(*(const sph_u32 *)src);
+ return sph_bswap32(*(const sph_u32 *)src);
#else
- return *(const sph_u32 *)src;
+ return *(const sph_u32 *)src;
#endif
#else
- if (((SPH_UPTR)src & 3) == 0) {
+ if (((SPH_UPTR)src & 3) == 0) {
#if SPH_LITTLE_ENDIAN
- return sph_bswap32(*(const sph_u32 *)src);
+ return sph_bswap32(*(const sph_u32 *)src);
#else
- return *(const sph_u32 *)src;
+ return *(const sph_u32 *)src;
#endif
- } else {
- return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
- | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
- | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
- | (sph_u32)(((const unsigned char *)src)[3]);
- }
+ } else {
+ return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
+ | (sph_u32)(((const unsigned char *)src)[3]);
+ }
#endif
#else
- return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
- | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
- | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
- | (sph_u32)(((const unsigned char *)src)[3]);
+ return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
+ | (sph_u32)(((const unsigned char *)src)[3]);
#endif
}
@@ -1469,14 +1469,14 @@ static SPH_INLINE sph_u32
sph_dec32be_aligned(const void *src)
{
#if SPH_LITTLE_ENDIAN
- return sph_bswap32(*(const sph_u32 *)src);
+ return sph_bswap32(*(const sph_u32 *)src);
#elif SPH_BIG_ENDIAN
- return *(const sph_u32 *)src;
+ return *(const sph_u32 *)src;
#else
- return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
- | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
- | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
- | (sph_u32)(((const unsigned char *)src)[3]);
+ return ((sph_u32)(((const unsigned char *)src)[0]) << 24)
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 8)
+ | (sph_u32)(((const unsigned char *)src)[3]);
#endif
}
@@ -1492,27 +1492,27 @@ sph_enc32le(void *dst, sph_u32 val)
#if defined SPH_UPTR
#if SPH_UNALIGNED
#if SPH_BIG_ENDIAN
- val = sph_bswap32(val);
+ val = sph_bswap32(val);
#endif
- *(sph_u32 *)dst = val;
+ *(sph_u32 *)dst = val;
#else
- if (((SPH_UPTR)dst & 3) == 0) {
+ if (((SPH_UPTR)dst & 3) == 0) {
#if SPH_BIG_ENDIAN
- val = sph_bswap32(val);
+ val = sph_bswap32(val);
#endif
- *(sph_u32 *)dst = val;
- } else {
- ((unsigned char *)dst)[0] = (unsigned char)(val);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
- }
+ *(sph_u32 *)dst = val;
+ } else {
+ ((unsigned char *)dst)[0] = (unsigned char)(val);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
+ }
#endif
#else
- ((unsigned char *)dst)[0] = (unsigned char)(val);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[0] = (unsigned char)(val);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
#endif
}
@@ -1527,14 +1527,14 @@ static SPH_INLINE void
sph_enc32le_aligned(void *dst, sph_u32 val)
{
#if SPH_LITTLE_ENDIAN
- *(sph_u32 *)dst = val;
+ *(sph_u32 *)dst = val;
#elif SPH_BIG_ENDIAN
- *(sph_u32 *)dst = sph_bswap32(val);
+ *(sph_u32 *)dst = sph_bswap32(val);
#else
- ((unsigned char *)dst)[0] = (unsigned char)(val);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[0] = (unsigned char)(val);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
#endif
}
@@ -1550,25 +1550,25 @@ sph_dec32le(const void *src)
#if defined SPH_UPTR
#if SPH_UNALIGNED
#if SPH_BIG_ENDIAN
- return sph_bswap32(*(const sph_u32 *)src);
+ return sph_bswap32(*(const sph_u32 *)src);
#else
- return *(const sph_u32 *)src;
+ return *(const sph_u32 *)src;
#endif
#else
- if (((SPH_UPTR)src & 3) == 0) {
+ if (((SPH_UPTR)src & 3) == 0) {
#if SPH_BIG_ENDIAN
#if SPH_SPARCV9_GCC && !SPH_NO_ASM
- sph_u32 tmp;
-
- /*
- * "__volatile__" is needed here because without it,
- * gcc-3.4.3 miscompiles the code and performs the
- * access before the test on the address, thus triggering
- * a bus error...
- */
- __asm__ __volatile__ (
- "lda [%1]0x88,%0" : "=r" (tmp) : "r" (src));
- return tmp;
+ sph_u32 tmp;
+
+ /*
+ * "__volatile__" is needed here because without it,
+ * gcc-3.4.3 miscompiles the code and performs the
+ * access before the test on the address, thus triggering
+ * a bus error...
+ */
+ __asm__ __volatile__ (
+ "lda [%1]0x88,%0" : "=r" (tmp) : "r" (src));
+ return tmp;
/*
* On PowerPC, this turns out not to be worth the effort: the inline
* assembly makes GCC optimizer uncomfortable, which tends to nullify
@@ -1582,30 +1582,30 @@ sph_dec32le(const void *src)
* functions, the generic code appears to be efficient enough already.
*
#elif (SPH_PPC32_GCC || SPH_PPC64_GCC) && !SPH_NO_ASM
- sph_u32 tmp;
+ sph_u32 tmp;
- __asm__ __volatile__ (
- "lwbrx %0,0,%1" : "=r" (tmp) : "r" (src));
- return tmp;
+ __asm__ __volatile__ (
+ "lwbrx %0,0,%1" : "=r" (tmp) : "r" (src));
+ return tmp;
*/
#else
- return sph_bswap32(*(const sph_u32 *)src);
+ return sph_bswap32(*(const sph_u32 *)src);
#endif
#else
- return *(const sph_u32 *)src;
+ return *(const sph_u32 *)src;
#endif
- } else {
- return (sph_u32)(((const unsigned char *)src)[0])
- | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
- | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
- | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
- }
+ } else {
+ return (sph_u32)(((const unsigned char *)src)[0])
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
+ }
#endif
#else
- return (sph_u32)(((const unsigned char *)src)[0])
- | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
- | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
- | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
+ return (sph_u32)(((const unsigned char *)src)[0])
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
#endif
}
@@ -1620,30 +1620,30 @@ static SPH_INLINE sph_u32
sph_dec32le_aligned(const void *src)
{
#if SPH_LITTLE_ENDIAN
- return *(const sph_u32 *)src;
+ return *(const sph_u32 *)src;
#elif SPH_BIG_ENDIAN
#if SPH_SPARCV9_GCC && !SPH_NO_ASM
- sph_u32 tmp;
+ sph_u32 tmp;
- __asm__ __volatile__ ("lda [%1]0x88,%0" : "=r" (tmp) : "r" (src));
- return tmp;
+ __asm__ __volatile__ ("lda [%1]0x88,%0" : "=r" (tmp) : "r" (src));
+ return tmp;
/*
* Not worth it generally.
*
#elif (SPH_PPC32_GCC || SPH_PPC64_GCC) && !SPH_NO_ASM
- sph_u32 tmp;
+ sph_u32 tmp;
- __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (tmp) : "r" (src));
- return tmp;
+ __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (tmp) : "r" (src));
+ return tmp;
*/
#else
- return sph_bswap32(*(const sph_u32 *)src);
+ return sph_bswap32(*(const sph_u32 *)src);
#endif
#else
- return (sph_u32)(((const unsigned char *)src)[0])
- | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
- | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
- | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
+ return (sph_u32)(((const unsigned char *)src)[0])
+ | ((sph_u32)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u32)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u32)(((const unsigned char *)src)[3]) << 24);
#endif
}
@@ -1661,35 +1661,35 @@ sph_enc64be(void *dst, sph_u64 val)
#if defined SPH_UPTR
#if SPH_UNALIGNED
#if SPH_LITTLE_ENDIAN
- val = sph_bswap64(val);
+ val = sph_bswap64(val);
#endif
- *(sph_u64 *)dst = val;
+ *(sph_u64 *)dst = val;
#else
- if (((SPH_UPTR)dst & 7) == 0) {
+ if (((SPH_UPTR)dst & 7) == 0) {
#if SPH_LITTLE_ENDIAN
- val = sph_bswap64(val);
-#endif
- *(sph_u64 *)dst = val;
- } else {
- ((unsigned char *)dst)[0] = (val >> 56);
- ((unsigned char *)dst)[1] = (val >> 48);
- ((unsigned char *)dst)[2] = (val >> 40);
- ((unsigned char *)dst)[3] = (val >> 32);
- ((unsigned char *)dst)[4] = (val >> 24);
- ((unsigned char *)dst)[5] = (val >> 16);
- ((unsigned char *)dst)[6] = (val >> 8);
- ((unsigned char *)dst)[7] = val;
- }
+ val = sph_bswap64(val);
+#endif
+ *(sph_u64 *)dst = val;
+ } else {
+ ((unsigned char *)dst)[0] = (val >> 56);
+ ((unsigned char *)dst)[1] = (val >> 48);
+ ((unsigned char *)dst)[2] = (val >> 40);
+ ((unsigned char *)dst)[3] = (val >> 32);
+ ((unsigned char *)dst)[4] = (val >> 24);
+ ((unsigned char *)dst)[5] = (val >> 16);
+ ((unsigned char *)dst)[6] = (val >> 8);
+ ((unsigned char *)dst)[7] = val;
+ }
#endif
#else
- ((unsigned char *)dst)[0] = (unsigned char)(val >> 56);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 48);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 40);
- ((unsigned char *)dst)[3] = (unsigned char)(val >> 32);
- ((unsigned char *)dst)[4] = (unsigned char)(val >> 24);
- ((unsigned char *)dst)[5] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[6] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[7] = (unsigned char)(val);
+ ((unsigned char *)dst)[0] = (unsigned char)(val >> 56);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 48);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 40);
+ ((unsigned char *)dst)[3] = (unsigned char)(val >> 32);
+ ((unsigned char *)dst)[4] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[5] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[6] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[7] = (unsigned char)(val);
#endif
}
@@ -1704,18 +1704,18 @@ static SPH_INLINE void
sph_enc64be_aligned(void *dst, sph_u64 val)
{
#if SPH_LITTLE_ENDIAN
- *(sph_u64 *)dst = sph_bswap64(val);
+ *(sph_u64 *)dst = sph_bswap64(val);
#elif SPH_BIG_ENDIAN
- *(sph_u64 *)dst = val;
+ *(sph_u64 *)dst = val;
#else
- ((unsigned char *)dst)[0] = (unsigned char)(val >> 56);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 48);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 40);
- ((unsigned char *)dst)[3] = (unsigned char)(val >> 32);
- ((unsigned char *)dst)[4] = (unsigned char)(val >> 24);
- ((unsigned char *)dst)[5] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[6] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[7] = (unsigned char)(val);
+ ((unsigned char *)dst)[0] = (unsigned char)(val >> 56);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 48);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 40);
+ ((unsigned char *)dst)[3] = (unsigned char)(val >> 32);
+ ((unsigned char *)dst)[4] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[5] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[6] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[7] = (unsigned char)(val);
#endif
}
@@ -1731,37 +1731,37 @@ sph_dec64be(const void *src)
#if defined SPH_UPTR
#if SPH_UNALIGNED
#if SPH_LITTLE_ENDIAN
- return sph_bswap64(*(const sph_u64 *)src);
+ return sph_bswap64(*(const sph_u64 *)src);
#else
- return *(const sph_u64 *)src;
+ return *(const sph_u64 *)src;
#endif
#else
- if (((SPH_UPTR)src & 7) == 0) {
+ if (((SPH_UPTR)src & 7) == 0) {
#if SPH_LITTLE_ENDIAN
- return sph_bswap64(*(const sph_u64 *)src);
+ return sph_bswap64(*(const sph_u64 *)src);
#else
- return *(const sph_u64 *)src;
-#endif
- } else {
- return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
- | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
- | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
- | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
- | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
- | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
- | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
- | (sph_u64)(((const unsigned char *)src)[7]);
- }
+ return *(const sph_u64 *)src;
+#endif
+ } else {
+ return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
+ | (sph_u64)(((const unsigned char *)src)[7]);
+ }
#endif
#else
- return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
- | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
- | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
- | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
- | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
- | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
- | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
- | (sph_u64)(((const unsigned char *)src)[7]);
+ return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
+ | (sph_u64)(((const unsigned char *)src)[7]);
#endif
}
@@ -1776,18 +1776,18 @@ static SPH_INLINE sph_u64
sph_dec64be_aligned(const void *src)
{
#if SPH_LITTLE_ENDIAN
- return sph_bswap64(*(const sph_u64 *)src);
+ return sph_bswap64(*(const sph_u64 *)src);
#elif SPH_BIG_ENDIAN
- return *(const sph_u64 *)src;
+ return *(const sph_u64 *)src;
#else
- return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
- | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
- | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
- | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
- | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
- | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
- | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
- | (sph_u64)(((const unsigned char *)src)[7]);
+ return ((sph_u64)(((const unsigned char *)src)[0]) << 56)
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 8)
+ | (sph_u64)(((const unsigned char *)src)[7]);
#endif
}
@@ -1803,35 +1803,35 @@ sph_enc64le(void *dst, sph_u64 val)
#if defined SPH_UPTR
#if SPH_UNALIGNED
#if SPH_BIG_ENDIAN
- val = sph_bswap64(val);
+ val = sph_bswap64(val);
#endif
- *(sph_u64 *)dst = val;
+ *(sph_u64 *)dst = val;
#else
- if (((SPH_UPTR)dst & 7) == 0) {
+ if (((SPH_UPTR)dst & 7) == 0) {
#if SPH_BIG_ENDIAN
- val = sph_bswap64(val);
-#endif
- *(sph_u64 *)dst = val;
- } else {
- ((unsigned char *)dst)[0] = val;
- ((unsigned char *)dst)[1] = (val >> 8);
- ((unsigned char *)dst)[2] = (val >> 16);
- ((unsigned char *)dst)[3] = (val >> 24);
- ((unsigned char *)dst)[4] = (val >> 32);
- ((unsigned char *)dst)[5] = (val >> 40);
- ((unsigned char *)dst)[6] = (val >> 48);
- ((unsigned char *)dst)[7] = (val >> 56);
- }
+ val = sph_bswap64(val);
+#endif
+ *(sph_u64 *)dst = val;
+ } else {
+ ((unsigned char *)dst)[0] = val;
+ ((unsigned char *)dst)[1] = (val >> 8);
+ ((unsigned char *)dst)[2] = (val >> 16);
+ ((unsigned char *)dst)[3] = (val >> 24);
+ ((unsigned char *)dst)[4] = (val >> 32);
+ ((unsigned char *)dst)[5] = (val >> 40);
+ ((unsigned char *)dst)[6] = (val >> 48);
+ ((unsigned char *)dst)[7] = (val >> 56);
+ }
#endif
#else
- ((unsigned char *)dst)[0] = (unsigned char)(val);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
- ((unsigned char *)dst)[4] = (unsigned char)(val >> 32);
- ((unsigned char *)dst)[5] = (unsigned char)(val >> 40);
- ((unsigned char *)dst)[6] = (unsigned char)(val >> 48);
- ((unsigned char *)dst)[7] = (unsigned char)(val >> 56);
+ ((unsigned char *)dst)[0] = (unsigned char)(val);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[4] = (unsigned char)(val >> 32);
+ ((unsigned char *)dst)[5] = (unsigned char)(val >> 40);
+ ((unsigned char *)dst)[6] = (unsigned char)(val >> 48);
+ ((unsigned char *)dst)[7] = (unsigned char)(val >> 56);
#endif
}
@@ -1846,18 +1846,18 @@ static SPH_INLINE void
sph_enc64le_aligned(void *dst, sph_u64 val)
{
#if SPH_LITTLE_ENDIAN
- *(sph_u64 *)dst = val;
+ *(sph_u64 *)dst = val;
#elif SPH_BIG_ENDIAN
- *(sph_u64 *)dst = sph_bswap64(val);
+ *(sph_u64 *)dst = sph_bswap64(val);
#else
- ((unsigned char *)dst)[0] = (unsigned char)(val);
- ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
- ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
- ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
- ((unsigned char *)dst)[4] = (unsigned char)(val >> 32);
- ((unsigned char *)dst)[5] = (unsigned char)(val >> 40);
- ((unsigned char *)dst)[6] = (unsigned char)(val >> 48);
- ((unsigned char *)dst)[7] = (unsigned char)(val >> 56);
+ ((unsigned char *)dst)[0] = (unsigned char)(val);
+ ((unsigned char *)dst)[1] = (unsigned char)(val >> 8);
+ ((unsigned char *)dst)[2] = (unsigned char)(val >> 16);
+ ((unsigned char *)dst)[3] = (unsigned char)(val >> 24);
+ ((unsigned char *)dst)[4] = (unsigned char)(val >> 32);
+ ((unsigned char *)dst)[5] = (unsigned char)(val >> 40);
+ ((unsigned char *)dst)[6] = (unsigned char)(val >> 48);
+ ((unsigned char *)dst)[7] = (unsigned char)(val >> 56);
#endif
}
@@ -1873,59 +1873,59 @@ sph_dec64le(const void *src)
#if defined SPH_UPTR
#if SPH_UNALIGNED
#if SPH_BIG_ENDIAN
- return sph_bswap64(*(const sph_u64 *)src);
+ return sph_bswap64(*(const sph_u64 *)src);
#else
- return *(const sph_u64 *)src;
+ return *(const sph_u64 *)src;
#endif
#else
- if (((SPH_UPTR)src & 7) == 0) {
+ if (((SPH_UPTR)src & 7) == 0) {
#if SPH_BIG_ENDIAN
#if SPH_SPARCV9_GCC_64 && !SPH_NO_ASM
- sph_u64 tmp;
+ sph_u64 tmp;
- __asm__ __volatile__ (
- "ldxa [%1]0x88,%0" : "=r" (tmp) : "r" (src));
- return tmp;
+ __asm__ __volatile__ (
+ "ldxa [%1]0x88,%0" : "=r" (tmp) : "r" (src));
+ return tmp;
/*
* Not worth it generally.
*
#elif SPH_PPC32_GCC && !SPH_NO_ASM
- return (sph_u64)sph_dec32le_aligned(src)
- | ((sph_u64)sph_dec32le_aligned(
- (const char *)src + 4) << 32);
+ return (sph_u64)sph_dec32le_aligned(src)
+ | ((sph_u64)sph_dec32le_aligned(
+ (const char *)src + 4) << 32);
#elif SPH_PPC64_GCC && !SPH_NO_ASM
- sph_u64 tmp;
+ sph_u64 tmp;
- __asm__ __volatile__ (
- "ldbrx %0,0,%1" : "=r" (tmp) : "r" (src));
- return tmp;
+ __asm__ __volatile__ (
+ "ldbrx %0,0,%1" : "=r" (tmp) : "r" (src));
+ return tmp;
*/
#else
- return sph_bswap64(*(const sph_u64 *)src);
+ return sph_bswap64(*(const sph_u64 *)src);
#endif
#else
- return *(const sph_u64 *)src;
-#endif
- } else {
- return (sph_u64)(((const unsigned char *)src)[0])
- | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
- | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
- | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
- | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
- | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
- | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
- | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
- }
+ return *(const sph_u64 *)src;
+#endif
+ } else {
+ return (sph_u64)(((const unsigned char *)src)[0])
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
+ }
#endif
#else
- return (sph_u64)(((const unsigned char *)src)[0])
- | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
- | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
- | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
- | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
- | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
- | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
- | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
+ return (sph_u64)(((const unsigned char *)src)[0])
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
#endif
}
@@ -1940,37 +1940,37 @@ static SPH_INLINE sph_u64
sph_dec64le_aligned(const void *src)
{
#if SPH_LITTLE_ENDIAN
- return *(const sph_u64 *)src;
+ return *(const sph_u64 *)src;
#elif SPH_BIG_ENDIAN
#if SPH_SPARCV9_GCC_64 && !SPH_NO_ASM
- sph_u64 tmp;
+ sph_u64 tmp;
- __asm__ __volatile__ ("ldxa [%1]0x88,%0" : "=r" (tmp) : "r" (src));
- return tmp;
+ __asm__ __volatile__ ("ldxa [%1]0x88,%0" : "=r" (tmp) : "r" (src));
+ return tmp;
/*
* Not worth it generally.
*
#elif SPH_PPC32_GCC && !SPH_NO_ASM
- return (sph_u64)sph_dec32le_aligned(src)
- | ((sph_u64)sph_dec32le_aligned((const char *)src + 4) << 32);
+ return (sph_u64)sph_dec32le_aligned(src)
+ | ((sph_u64)sph_dec32le_aligned((const char *)src + 4) << 32);
#elif SPH_PPC64_GCC && !SPH_NO_ASM
- sph_u64 tmp;
+ sph_u64 tmp;
- __asm__ __volatile__ ("ldbrx %0,0,%1" : "=r" (tmp) : "r" (src));
- return tmp;
+ __asm__ __volatile__ ("ldbrx %0,0,%1" : "=r" (tmp) : "r" (src));
+ return tmp;
*/
#else
- return sph_bswap64(*(const sph_u64 *)src);
+ return sph_bswap64(*(const sph_u64 *)src);
#endif
#else
- return (sph_u64)(((const unsigned char *)src)[0])
- | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
- | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
- | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
- | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
- | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
- | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
- | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
+ return (sph_u64)(((const unsigned char *)src)[0])
+ | ((sph_u64)(((const unsigned char *)src)[1]) << 8)
+ | ((sph_u64)(((const unsigned char *)src)[2]) << 16)
+ | ((sph_u64)(((const unsigned char *)src)[3]) << 24)
+ | ((sph_u64)(((const unsigned char *)src)[4]) << 32)
+ | ((sph_u64)(((const unsigned char *)src)[5]) << 40)
+ | ((sph_u64)(((const unsigned char *)src)[6]) << 48)
+ | ((sph_u64)(((const unsigned char *)src)[7]) << 56);
#endif
}