aboutsummaryrefslogtreecommitdiff
path: root/src/liblzma/common/memcmplen.h
diff options
context:
space:
mode:
authorLasse Collin <lasse.collin@tukaani.org>2022-10-05 14:26:00 +0300
committerLasse Collin <lasse.collin@tukaani.org>2022-10-05 14:26:00 +0300
commitfae37ad2affd8fe8871f4ff93d5cab5ec14d5e58 (patch)
treed45621a5a9f0737dfab3051d2ed7b70bdd0b12a6 /src/liblzma/common/memcmplen.h
parentliblzma: Add API doc note about the .xz decoder LZMA_MEMLIMIT_ERROR bug. (diff)
downloadxz-fae37ad2affd8fe8871f4ff93d5cab5ec14d5e58.tar.xz
tuklib_integer: Add 64-bit endianness-converting reads and writes.
Also update the comment in liblzma's memcmplen.h. Thanks to Michał Górny for the original patch for the reads.
Diffstat (limited to '')
-rw-r--r--src/liblzma/common/memcmplen.h9
1 files changed, 3 insertions, 6 deletions
diff --git a/src/liblzma/common/memcmplen.h b/src/liblzma/common/memcmplen.h
index dcfd8d6f..a80428b9 100644
--- a/src/liblzma/common/memcmplen.h
+++ b/src/liblzma/common/memcmplen.h
@@ -51,10 +51,6 @@ lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
|| (defined(__INTEL_COMPILER) && defined(__x86_64__)) \
|| (defined(__INTEL_COMPILER) && defined(_M_X64)) \
|| (defined(_MSC_VER) && defined(_M_X64)))
- // NOTE: This will use 64-bit unaligned access which
- // TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit, but
- // it's convenient here at least as long as it's x86-64 only.
- //
// I keep this x86-64 only for now since that's where I know this
// to be a good method. This may be fine on other 64-bit CPUs too.
// On big endian one should use xor instead of subtraction and switch
@@ -84,8 +80,9 @@ lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
|| (defined(__INTEL_COMPILER) && defined(__SSE2__)) \
|| (defined(_MSC_VER) && defined(_M_IX86_FP) \
&& _M_IX86_FP >= 2))
- // NOTE: Like above, this will use 128-bit unaligned access which
- // TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit.
+ // NOTE: This will use 128-bit unaligned access which
+ // TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit,
+ // but it's convenient here since this is x86-only.
//
// SSE2 version for 32-bit and 64-bit x86. On x86-64 the above
// version is sometimes significantly faster and sometimes