aboutsummaryrefslogtreecommitdiff
path: root/src/common
diff options
context:
space:
mode:
authorLasse Collin <lasse.collin@tukaani.org>2022-10-05 14:26:00 +0300
committerLasse Collin <lasse.collin@tukaani.org>2022-10-05 14:26:00 +0300
commitfae37ad2affd8fe8871f4ff93d5cab5ec14d5e58 (patch)
treed45621a5a9f0737dfab3051d2ed7b70bdd0b12a6 /src/common
parentliblzma: Add API doc note about the .xz decoder LZMA_MEMLIMIT_ERROR bug. (diff)
downloadxz-fae37ad2affd8fe8871f4ff93d5cab5ec14d5e58.tar.xz
tuklib_integer: Add 64-bit endianness-converting reads and writes.
Also update the comment in liblzma's memcmplen.h. Thanks to Michał Górny for the original patch for the reads.
Diffstat (limited to 'src/common')
-rw-r--r--src/common/tuklib_integer.h46
1 files changed, 44 insertions, 2 deletions
diff --git a/src/common/tuklib_integer.h b/src/common/tuklib_integer.h
index 6f44a7a0..b58ef68d 100644
--- a/src/common/tuklib_integer.h
+++ b/src/common/tuklib_integer.h
@@ -17,8 +17,8 @@
/// - Byte swapping: bswapXX(num)
/// - Byte order conversions to/from native (byteswaps if Y isn't
/// the native endianness): convXXYe(num)
-/// - Unaligned reads (16/32-bit only): readXXYe(ptr)
-/// - Unaligned writes (16/32-bit only): writeXXYe(ptr, num)
+/// - Unaligned reads: readXXYe(ptr)
+/// - Unaligned writes: writeXXYe(ptr, num)
/// - Aligned reads: aligned_readXXYe(ptr)
/// - Aligned writes: aligned_writeXXYe(ptr, num)
///
@@ -343,6 +343,46 @@ read32le(const uint8_t *buf)
}
+static inline uint64_t
+read64be(const uint8_t *buf)
+{
+#if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
+ uint64_t num = read64ne(buf);
+ return conv64be(num);
+#else
+ uint64_t num = (uint64_t)buf[0] << 56;
+ num |= (uint64_t)buf[1] << 48;
+ num |= (uint64_t)buf[2] << 40;
+ num |= (uint64_t)buf[3] << 32;
+ num |= (uint64_t)buf[4] << 24;
+ num |= (uint64_t)buf[5] << 16;
+ num |= (uint64_t)buf[6] << 8;
+ num |= (uint64_t)buf[7];
+ return num;
+#endif
+}
+
+
+static inline uint64_t
+read64le(const uint8_t *buf)
+{
+#if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
+ uint64_t num = read64ne(buf);
+ return conv64le(num);
+#else
+ uint64_t num = (uint64_t)buf[0];
+ num |= (uint64_t)buf[1] << 8;
+ num |= (uint64_t)buf[2] << 16;
+ num |= (uint64_t)buf[3] << 24;
+ num |= (uint64_t)buf[4] << 32;
+ num |= (uint64_t)buf[5] << 40;
+ num |= (uint64_t)buf[6] << 48;
+ num |= (uint64_t)buf[7] << 56;
+ return num;
+#endif
+}
+
+
// NOTE: Possible byte swapping must be done in a macro to allow the compiler
// to optimize byte swapping of constants when using glibc's or *BSD's
// byte swapping macros. The actual write is done in an inline function
@@ -350,11 +390,13 @@ read32le(const uint8_t *buf)
#if defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
# define write16be(buf, num) write16ne(buf, conv16be(num))
# define write32be(buf, num) write32ne(buf, conv32be(num))
+# define write64be(buf, num) write64ne(buf, conv64be(num))
#endif
#if !defined(WORDS_BIGENDIAN) || defined(TUKLIB_FAST_UNALIGNED_ACCESS)
# define write16le(buf, num) write16ne(buf, conv16le(num))
# define write32le(buf, num) write32ne(buf, conv32le(num))
+# define write64le(buf, num) write64ne(buf, conv64le(num))
#endif