aboutsummaryrefslogtreecommitdiffstats
path: root/libqpdf/sph/md_helper.c
diff options
context:
space:
mode:
authorJay Berkenbilt <ejb@ql.org>2022-02-08 15:18:08 +0100
committerJay Berkenbilt <ejb@ql.org>2022-02-08 17:51:15 +0100
commitcb769c62e55599e9f980001830bc61d9fcaa64a9 (patch)
tree0bf980c385a61cbc8720cf990762ffc1200f9d6a /libqpdf/sph/md_helper.c
parent716381f65a2b2dc72f8da2426ba71aeab02c507f (diff)
downloadqpdf-cb769c62e55599e9f980001830bc61d9fcaa64a9.tar.zst
WHITESPACE ONLY -- expand tabs in source code
This comment expands all tabs using an 8-character tab-width. You should ignore this commit when using git blame or use git blame -w. In the early days, I used to use tabs where possible for indentation, since emacs did this automatically. In recent years, I have switched to only using spaces, which means qpdf source code has been a mixture of spaces and tabs. I have avoided cleaning this up because of not wanting gratuitous whitespaces change to cloud the output of git blame, but I changed my mind after discussing with users who view qpdf source code in editors/IDEs that have other tab widths by default and in light of the fact that I am planning to start applying automatic code formatting soon.
Diffstat (limited to 'libqpdf/sph/md_helper.c')
-rw-r--r--libqpdf/sph/md_helper.c262
1 files changed, 131 insertions, 131 deletions
diff --git a/libqpdf/sph/md_helper.c b/libqpdf/sph/md_helper.c
index 829391a7..5c114515 100644
--- a/libqpdf/sph/md_helper.c
+++ b/libqpdf/sph/md_helper.c
@@ -128,103 +128,103 @@ void
SPH_XCAT(sph_, HASH)(void *cc, const void *data, size_t len)
#endif
{
- SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
- unsigned current;
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current;
- sc = cc;
+ sc = cc;
#if SPH_64
- current = (unsigned)sc->count & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
#else
- current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
#endif
- while (len > 0) {
- unsigned clen;
+ while (len > 0) {
+ unsigned clen;
#if !SPH_64
- sph_u32 clow, clow2;
+ sph_u32 clow, clow2;
#endif
- clen = SPH_BLEN - current;
- if (clen > len)
- clen = (unsigned)len;
- memcpy(sc->buf + current, data, clen);
- data = (const unsigned char *)data + clen;
- current += clen;
- len -= clen;
- if (current == SPH_BLEN) {
- RFUN(sc->buf, SPH_VAL);
- current = 0;
- }
+ clen = SPH_BLEN - current;
+ if (clen > len)
+ clen = (unsigned)len;
+ memcpy(sc->buf + current, data, clen);
+ data = (const unsigned char *)data + clen;
+ current += clen;
+ len -= clen;
+ if (current == SPH_BLEN) {
+ RFUN(sc->buf, SPH_VAL);
+ current = 0;
+ }
#if SPH_64
- sc->count += clen;
+ sc->count += clen;
#else
- clow = sc->count_low;
- clow2 = SPH_T32(clow + clen);
- sc->count_low = clow2;
- if (clow2 < clow)
- sc->count_high ++;
+ clow = sc->count_low;
+ clow2 = SPH_T32(clow + clen);
+ sc->count_low = clow2;
+ if (clow2 < clow)
+ sc->count_high ++;
#endif
- }
+ }
}
#ifdef SPH_UPTR
void
SPH_XCAT(sph_, HASH)(void *cc, const void *data, size_t len)
{
- SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
- unsigned current;
- size_t orig_len;
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current;
+ size_t orig_len;
#if !SPH_64
- sph_u32 clow, clow2;
+ sph_u32 clow, clow2;
#endif
- if (len < (2 * SPH_BLEN)) {
- SPH_XCAT(HASH, _short)(cc, data, len);
- return;
- }
- sc = cc;
+ if (len < (2 * SPH_BLEN)) {
+ SPH_XCAT(HASH, _short)(cc, data, len);
+ return;
+ }
+ sc = cc;
#if SPH_64
- current = (unsigned)sc->count & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
#else
- current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
#endif
- if (current > 0) {
- unsigned t;
+ if (current > 0) {
+ unsigned t;
- t = SPH_BLEN - current;
- SPH_XCAT(HASH, _short)(cc, data, t);
- data = (const unsigned char *)data + t;
- len -= t;
- }
+ t = SPH_BLEN - current;
+ SPH_XCAT(HASH, _short)(cc, data, t);
+ data = (const unsigned char *)data + t;
+ len -= t;
+ }
#if !SPH_UNALIGNED
- if (((SPH_UPTR)data & (SPH_WLEN - 1U)) != 0) {
- SPH_XCAT(HASH, _short)(cc, data, len);
- return;
- }
+ if (((SPH_UPTR)data & (SPH_WLEN - 1U)) != 0) {
+ SPH_XCAT(HASH, _short)(cc, data, len);
+ return;
+ }
#endif
- orig_len = len;
- while (len >= SPH_BLEN) {
- RFUN(data, SPH_VAL);
- len -= SPH_BLEN;
- data = (const unsigned char *)data + SPH_BLEN;
- }
- if (len > 0)
- memcpy(sc->buf, data, len);
+ orig_len = len;
+ while (len >= SPH_BLEN) {
+ RFUN(data, SPH_VAL);
+ len -= SPH_BLEN;
+ data = (const unsigned char *)data + SPH_BLEN;
+ }
+ if (len > 0)
+ memcpy(sc->buf, data, len);
#if SPH_64
- sc->count += (sph_u64)orig_len;
+ sc->count += (sph_u64)orig_len;
#else
- clow = sc->count_low;
- clow2 = SPH_T32(clow + orig_len);
- sc->count_low = clow2;
- if (clow2 < clow)
- sc->count_high ++;
- /*
- * This code handles the improbable situation where "size_t" is
- * greater than 32 bits, and yet we do not have a 64-bit type.
- */
- orig_len >>= 12;
- orig_len >>= 10;
- orig_len >>= 10;
- sc->count_high += orig_len;
+ clow = sc->count_low;
+ clow2 = SPH_T32(clow + orig_len);
+ sc->count_low = clow2;
+ if (clow2 < clow)
+ sc->count_high ++;
+ /*
+ * This code handles the improbable situation where "size_t" is
+ * greater than 32 bits, and yet we do not have a 64-bit type.
+ */
+ orig_len >>= 12;
+ orig_len >>= 10;
+ orig_len >>= 10;
+ sc->count_high += orig_len;
#endif
}
#endif
@@ -237,110 +237,110 @@ SPH_XCAT(sph_, HASH)(void *cc, const void *data, size_t len)
*/
static void
SPH_XCAT(HASH, _addbits_and_close)(void *cc,
- unsigned ub, unsigned n, void *dst, unsigned rnum)
+ unsigned ub, unsigned n, void *dst, unsigned rnum)
{
- SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
- unsigned current, u;
+ SPH_XCAT(sph_, SPH_XCAT(HASH, _context)) *sc;
+ unsigned current, u;
#if !SPH_64
- sph_u32 low, high;
+ sph_u32 low, high;
#endif
- sc = cc;
+ sc = cc;
#if SPH_64
- current = (unsigned)sc->count & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count & (SPH_BLEN - 1U);
#else
- current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
+ current = (unsigned)sc->count_low & (SPH_BLEN - 1U);
#endif
#ifdef PW01
- sc->buf[current ++] = (0x100 | (ub & 0xFF)) >> (8 - n);
+ sc->buf[current ++] = (0x100 | (ub & 0xFF)) >> (8 - n);
#else
- {
- unsigned z;
+ {
+ unsigned z;
- z = 0x80U >> n;
- sc->buf[current ++] = ((ub & -z) | z) & 0xFF;
- }
+ z = 0x80U >> n;
+ sc->buf[current ++] = ((ub & -z) | z) & 0xFF;
+ }
#endif
- if (current > SPH_MAXPAD) {
- memset(sc->buf + current, 0, SPH_BLEN - current);
- RFUN(sc->buf, SPH_VAL);
- memset(sc->buf, 0, SPH_MAXPAD);
- } else {
- memset(sc->buf + current, 0, SPH_MAXPAD - current);
- }
+ if (current > SPH_MAXPAD) {
+ memset(sc->buf + current, 0, SPH_BLEN - current);
+ RFUN(sc->buf, SPH_VAL);
+ memset(sc->buf, 0, SPH_MAXPAD);
+ } else {
+ memset(sc->buf + current, 0, SPH_MAXPAD - current);
+ }
#if defined BE64
#if defined PLW1
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#elif defined PLW4
- memset(sc->buf + SPH_MAXPAD, 0, 2 * SPH_WLEN);
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN,
- sc->count >> 61);
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 3 * SPH_WLEN,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ memset(sc->buf + SPH_MAXPAD, 0, 2 * SPH_WLEN);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN,
+ sc->count >> 61);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + 3 * SPH_WLEN,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#else
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD, sc->count >> 61);
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD, sc->count >> 61);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#endif
#elif defined LE64
#if defined PLW1
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#elif defined PLW1
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
- memset(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN, 0, 2 * SPH_WLEN);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
+ memset(sc->buf + SPH_MAXPAD + 2 * SPH_WLEN, 0, 2 * SPH_WLEN);
#else
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD + SPH_WLEN, sc->count >> 61);
#endif
#else
#if SPH_64
#ifdef BE32
- sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64be_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#else
- sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
- SPH_T64(sc->count << 3) + (sph_u64)n);
+ sph_enc64le_aligned(sc->buf + SPH_MAXPAD,
+ SPH_T64(sc->count << 3) + (sph_u64)n);
#endif
#else
- low = sc->count_low;
- high = SPH_T32((sc->count_high << 3) | (low >> 29));
- low = SPH_T32(low << 3) + (sph_u32)n;
+ low = sc->count_low;
+ high = SPH_T32((sc->count_high << 3) | (low >> 29));
+ low = SPH_T32(low << 3) + (sph_u32)n;
#ifdef BE32
- sph_enc32be(sc->buf + SPH_MAXPAD, high);
- sph_enc32be(sc->buf + SPH_MAXPAD + SPH_WLEN, low);
+ sph_enc32be(sc->buf + SPH_MAXPAD, high);
+ sph_enc32be(sc->buf + SPH_MAXPAD + SPH_WLEN, low);
#else
- sph_enc32le(sc->buf + SPH_MAXPAD, low);
- sph_enc32le(sc->buf + SPH_MAXPAD + SPH_WLEN, high);
+ sph_enc32le(sc->buf + SPH_MAXPAD, low);
+ sph_enc32le(sc->buf + SPH_MAXPAD + SPH_WLEN, high);
#endif
#endif
#endif
- RFUN(sc->buf, SPH_VAL);
+ RFUN(sc->buf, SPH_VAL);
#ifdef SPH_NO_OUTPUT
- (void)dst;
- (void)rnum;
- (void)u;
+ (void)dst;
+ (void)rnum;
+ (void)u;
#else
- for (u = 0; u < rnum; u ++) {
+ for (u = 0; u < rnum; u ++) {
#if defined BE64
- sph_enc64be((unsigned char *)dst + 8 * u, sc->val[u]);
+ sph_enc64be((unsigned char *)dst + 8 * u, sc->val[u]);
#elif defined LE64
- sph_enc64le((unsigned char *)dst + 8 * u, sc->val[u]);
+ sph_enc64le((unsigned char *)dst + 8 * u, sc->val[u]);
#elif defined BE32
- sph_enc32be((unsigned char *)dst + 4 * u, sc->val[u]);
+ sph_enc32be((unsigned char *)dst + 4 * u, sc->val[u]);
#else
- sph_enc32le((unsigned char *)dst + 4 * u, sc->val[u]);
+ sph_enc32le((unsigned char *)dst + 4 * u, sc->val[u]);
#endif
- }
+ }
#endif
}
static void
SPH_XCAT(HASH, _close)(void *cc, void *dst, unsigned rnum)
{
- SPH_XCAT(HASH, _addbits_and_close)(cc, 0, 0, dst, rnum);
+ SPH_XCAT(HASH, _addbits_and_close)(cc, 0, 0, dst, rnum);
}