diff options
author | Lasse Collin <lasse.collin@tukaani.org> | 2023-12-20 21:15:16 +0200 |
---|---|---|
committer | Lasse Collin <lasse.collin@tukaani.org> | 2023-12-28 17:17:39 +0200 |
commit | cd64dd70d5665b6048829c45772d08606f44672e (patch) | |
tree | 7e276ff67971dbb1c6c6282264dad694b17c5f69 /src/liblzma | |
parent | liblzma: Check also for __clang__ in memcmplen.h. (diff) | |
download | xz-cd64dd70d5665b6048829c45772d08606f44672e.tar.xz |
liblzma: Use 8-byte method in memcmplen.h on ARM64.
It requires fast unaligned access to 64-bit integers
and a fast instruction to count leading zeros in
a 64-bit integer (__builtin_ctzll()). This perhaps
should be enabled on some other archs too.
Thanks to Chenxi Mao for the original patch:
https://github.com/tukaani-project/xz/pull/75 (the first commit)
According to the numbers there, this may improve encoding
speed by about 3-5 %.
This enables the 8-byte method on MSVC ARM64 too which
should work but wasn't tested.
Diffstat (limited to 'src/liblzma')
-rw-r--r-- | src/liblzma/common/memcmplen.h | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/src/liblzma/common/memcmplen.h b/src/liblzma/common/memcmplen.h index 81922ba2..c095c05e 100644 --- a/src/liblzma/common/memcmplen.h +++ b/src/liblzma/common/memcmplen.h @@ -24,7 +24,8 @@ // can use the intrinsics without the header file. #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \ && defined(_MSC_VER) \ - && defined(_M_X64) \ + && (defined(_M_X64) \ + || defined(_M_ARM64) || defined(_M_ARM64EC)) \ && !defined(__INTEL_COMPILER) # include <intrin.h> #endif @@ -58,20 +59,21 @@ lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2, #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \ && (((TUKLIB_GNUC_REQ(3, 4) || defined(__clang__)) \ - && defined(__x86_64__)) \ + && (defined(__x86_64__) \ + || defined(__aarch64__))) \ || (defined(__INTEL_COMPILER) && defined(__x86_64__)) \ || (defined(__INTEL_COMPILER) && defined(_M_X64)) \ - || (defined(_MSC_VER) && defined(_M_X64))) - // I keep this x86-64 only for now since that's where I know this - // to be a good method. This may be fine on other 64-bit CPUs too. - // On big endian one should use xor instead of subtraction and switch - // to __builtin_clzll(). + || (defined(_MSC_VER) && (defined(_M_X64) \ + || defined(_M_ARM64) || defined(_M_ARM64EC)))) + // This is only for x86-64 and ARM64 for now. This might be fine on + // other 64-bit processors too. On big endian one should use xor + // instead of subtraction and switch to __builtin_clzll(). #define LZMA_MEMCMPLEN_EXTRA 8 while (len < limit) { const uint64_t x = read64ne(buf1 + len) - read64ne(buf2 + len); if (x != 0) { // MSVC or Intel C compiler on Windows -# if (defined(_MSC_VER) || defined(__INTEL_COMPILER)) && defined(_M_X64) +# if defined(_MSC_VER) || defined(__INTEL_COMPILER) unsigned long tmp; _BitScanForward64(&tmp, x); len += (uint32_t)tmp >> 3; |