From a71864f77dfb76b5d78a270641539947c312583a Mon Sep 17 00:00:00 2001 From: Lasse Collin Date: Sat, 5 Jan 2008 19:57:00 +0200 Subject: Fix typo in comment (INT64_MAX -> UINT64_MAX). --- src/liblzma/api/lzma/vli.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/liblzma/api/lzma/vli.h') diff --git a/src/liblzma/api/lzma/vli.h b/src/liblzma/api/lzma/vli.h index 322014e1..bc0770ce 100644 --- a/src/liblzma/api/lzma/vli.h +++ b/src/liblzma/api/lzma/vli.h @@ -154,7 +154,7 @@ typedef uint64_t lzma_vli; * The encoding scheme encodes seven bits to every byte, using minimum * number of bytes required to represent the given value. In other words, * it puts 7-63 bits into 1-9 bytes. This implementation limits the number - * of bits used to 63, thus num must be at maximum of INT64_MAX / 2. You + * of bits used to 63, thus num must be at maximum of UINT64_MAX / 2. You * may use LZMA_VLI_VALUE_MAX for clarity. * * \param vli Integer to be encoded -- cgit v1.2.3