1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
|
///////////////////////////////////////////////////////////////////////////////
//
/// \file test_index_hash.c
/// \brief Tests src/liblzma/common/index_hash.c API functions
///
/// \note No test included for lzma_index_hash_end since it
/// would be trivial unless tested for memory leaks
/// with something like valgrind
//
// Author: Jia Tan
//
///////////////////////////////////////////////////////////////////////////////
#include "tests.h"
// Needed for UNPADDED_SIZE_MIN and UNPADDED_SIZE_MAX macro definitions
// and index_size and vli_ceil4 helper functions
#include "common/index.h"
static void
test_lzma_index_hash_init(void)
{
#ifndef HAVE_DECODERS
assert_skip("Decoder support disabled");
#else
// First test with NULL index_hash.
// This should create a fresh index_hash.
lzma_index_hash *index_hash = lzma_index_hash_init(NULL, NULL);
assert_true(index_hash != NULL);
// Next test with non-NULL index_hash.
lzma_index_hash *second_hash = lzma_index_hash_init(index_hash, NULL);
// It should not create a new index_hash pointer.
// Instead it must just re-init the first index_hash.
assert_true(index_hash == second_hash);
lzma_index_hash_end(index_hash, NULL);
#endif
}
static void
test_lzma_index_hash_append(void)
{
#ifndef HAVE_DECODERS
assert_skip("Decoder support disabled");
#else
// Test all invalid parameters
assert_lzma_ret(lzma_index_hash_append(NULL, 0, 0),
LZMA_PROG_ERROR);
// Test NULL index_hash
assert_lzma_ret(lzma_index_hash_append(NULL, UNPADDED_SIZE_MIN,
LZMA_VLI_MAX), LZMA_PROG_ERROR);
// Test with invalid Unpadded Size
lzma_index_hash *index_hash = lzma_index_hash_init(NULL, NULL);
assert_true(index_hash != NULL);
assert_lzma_ret(lzma_index_hash_append(index_hash,
UNPADDED_SIZE_MIN - 1, LZMA_VLI_MAX),
LZMA_PROG_ERROR);
// Test with invalid Uncompressed Size
assert_lzma_ret(lzma_index_hash_append(index_hash,
UNPADDED_SIZE_MIN, LZMA_VLI_MAX + 1),
LZMA_PROG_ERROR);
// First append a Record describing a small Block.
// This should succeed.
assert_lzma_ret(lzma_index_hash_append(index_hash,
UNPADDED_SIZE_MIN, 1), LZMA_OK);
// Append another small Record.
assert_lzma_ret(lzma_index_hash_append(index_hash,
UNPADDED_SIZE_MIN, 1), LZMA_OK);
// Append a Record that would cause the compressed size to grow
// too big
assert_lzma_ret(lzma_index_hash_append(index_hash,
UNPADDED_SIZE_MAX, 1), LZMA_DATA_ERROR);
lzma_index_hash_end(index_hash, NULL);
#endif
}
#if defined(HAVE_ENCODERS) && defined(HAVE_DECODERS)
// Fill an index_hash with unpadded and uncompressed VLIs
// by calling lzma_index_hash_append
static void
fill_index_hash(lzma_index_hash *index_hash, const lzma_vli *unpadded_sizes,
const lzma_vli *uncomp_sizes, uint32_t block_count)
{
for (uint32_t i = 0; i < block_count; ++i)
assert_lzma_ret(lzma_index_hash_append(index_hash,
unpadded_sizes[i], uncomp_sizes[i]), LZMA_OK);
}
// Set the contents of buf to the expected Index based on the
// .xz specification. This needs the unpadded and uncompressed VLIs
// to correctly create the Index.
static void
generate_index(uint8_t *buf, const lzma_vli *unpadded_sizes,
const lzma_vli *uncomp_sizes, uint32_t block_count,
size_t index_max_size)
{
size_t in_pos = 0;
size_t out_pos = 0;
// First set Index Indicator
buf[out_pos++] = INDEX_INDICATOR;
// Next write out Number of Records
assert_lzma_ret(lzma_vli_encode(block_count, &in_pos, buf,
&out_pos, index_max_size), LZMA_STREAM_END);
// Next write out each Record.
// A Record consists of Unpadded Size and Uncompressed Size
// written next to each other as VLIs.
for (uint32_t i = 0; i < block_count; ++i) {
in_pos = 0;
assert_lzma_ret(lzma_vli_encode(unpadded_sizes[i], &in_pos,
buf, &out_pos, index_max_size), LZMA_STREAM_END);
in_pos = 0;
assert_lzma_ret(lzma_vli_encode(uncomp_sizes[i], &in_pos,
buf, &out_pos, index_max_size), LZMA_STREAM_END);
}
// Add Index Padding
lzma_vli rounded_out_pos = vli_ceil4(out_pos);
memzero(buf + out_pos, rounded_out_pos - out_pos);
out_pos = rounded_out_pos;
// Add the CRC32
write32le(buf + out_pos, lzma_crc32(buf, out_pos, 0));
out_pos += 4;
assert_uint_eq(out_pos, index_max_size);
}
#endif
static void
test_lzma_index_hash_decode(void)
{
#if !defined(HAVE_ENCODERS) || !defined(HAVE_DECODERS)
assert_skip("Encoder or decoder support disabled");
#else
lzma_index_hash *index_hash = lzma_index_hash_init(NULL, NULL);
assert_true(index_hash != NULL);
size_t in_pos = 0;
// Six valid values for the Unpadded Size fields in an Index
const lzma_vli unpadded_sizes[6] = {
UNPADDED_SIZE_MIN,
1000,
4000,
8000,
16000,
32000
};
// Six valid values for the Uncompressed Size fields in an Index
const lzma_vli uncomp_sizes[6] = {
1,
500,
8000,
20,
1,
500
};
// Add two Records to an index_hash
fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 2);
const lzma_vli size_two_records = lzma_index_hash_size(index_hash);
assert_uint(size_two_records, >, 0);
uint8_t *index_two_records = tuktest_malloc(size_two_records);
generate_index(index_two_records, unpadded_sizes, uncomp_sizes, 2,
size_two_records);
// First test for basic buffer size error
in_pos = size_two_records + 1;
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_two_records, &in_pos,
size_two_records), LZMA_BUF_ERROR);
// Next test for invalid Index Indicator
in_pos = 0;
index_two_records[0] ^= 1;
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_two_records, &in_pos,
size_two_records), LZMA_DATA_ERROR);
index_two_records[0] ^= 1;
// Next verify the index_hash as expected
in_pos = 0;
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_two_records, &in_pos,
size_two_records), LZMA_STREAM_END);
// Next test an index_hash with three Records
index_hash = lzma_index_hash_init(index_hash, NULL);
fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 3);
const lzma_vli size_three_records = lzma_index_hash_size(
index_hash);
assert_uint(size_three_records, >, 0);
uint8_t *index_three_records = tuktest_malloc(size_three_records);
generate_index(index_three_records, unpadded_sizes, uncomp_sizes,
3, size_three_records);
in_pos = 0;
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_three_records, &in_pos,
size_three_records), LZMA_STREAM_END);
// Next test an index_hash with five Records
index_hash = lzma_index_hash_init(index_hash, NULL);
fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 5);
const lzma_vli size_five_records = lzma_index_hash_size(
index_hash);
assert_uint(size_five_records, >, 0);
uint8_t *index_five_records = tuktest_malloc(size_five_records);
generate_index(index_five_records, unpadded_sizes, uncomp_sizes, 5,
size_five_records);
// Instead of testing all input at once, give input
// one byte at a time
in_pos = 0;
for (lzma_vli i = 0; i < size_five_records - 1; ++i) {
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_five_records, &in_pos, in_pos + 1),
LZMA_OK);
}
// Last byte should return LZMA_STREAM_END
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_five_records, &in_pos,
in_pos + 1), LZMA_STREAM_END);
// Next test if the index_hash is given an incorrect Unpadded
// Size. Should detect and report LZMA_DATA_ERROR
index_hash = lzma_index_hash_init(index_hash, NULL);
fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 5);
// The sixth Record will have an invalid Unpadded Size
assert_lzma_ret(lzma_index_hash_append(index_hash,
unpadded_sizes[5] + 1,
uncomp_sizes[5]), LZMA_OK);
const lzma_vli size_six_records = lzma_index_hash_size(
index_hash);
assert_uint(size_six_records, >, 0);
uint8_t *index_six_records = tuktest_malloc(size_six_records);
generate_index(index_six_records, unpadded_sizes, uncomp_sizes, 6,
size_six_records);
in_pos = 0;
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_six_records, &in_pos,
size_six_records), LZMA_DATA_ERROR);
// Next test if the Index is corrupt (invalid CRC32).
// Should detect and report LZMA_DATA_ERROR
index_hash = lzma_index_hash_init(index_hash, NULL);
fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 2);
index_two_records[size_two_records - 1] ^= 1;
in_pos = 0;
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_two_records, &in_pos,
size_two_records), LZMA_DATA_ERROR);
// Next test with Index and index_hash struct not matching
// a Record
index_hash = lzma_index_hash_init(index_hash, NULL);
fill_index_hash(index_hash, unpadded_sizes, uncomp_sizes, 2);
// Recalculate Index with invalid Unpadded Size
const lzma_vli unpadded_sizes_invalid[2] = {
unpadded_sizes[0],
unpadded_sizes[1] + 1
};
generate_index(index_two_records, unpadded_sizes_invalid,
uncomp_sizes, 2, size_two_records);
in_pos = 0;
assert_lzma_ret(lzma_index_hash_decode(index_hash,
index_two_records, &in_pos,
size_two_records), LZMA_DATA_ERROR);
lzma_index_hash_end(index_hash, NULL);
#endif
}
static void
test_lzma_index_hash_size(void)
{
#ifndef HAVE_DECODERS
assert_skip("Decoder support disabled");
#else
lzma_index_hash *index_hash = lzma_index_hash_init(NULL, NULL);
assert_true(index_hash != NULL);
// First test empty index_hash
// Expected size should be:
// Index Indicator - 1 byte
// Number of Records - 1 byte
// List of Records - 0 bytes
// Index Padding - 2 bytes
// CRC32 - 4 bytes
// Total - 8 bytes
assert_uint_eq(lzma_index_hash_size(index_hash), 8);
// Append a Record describing a small Block to the index_hash
assert_lzma_ret(lzma_index_hash_append(index_hash,
UNPADDED_SIZE_MIN, 1), LZMA_OK);
// Expected size should be:
// Index Indicator - 1 byte
// Number of Records - 1 byte
// List of Records - 2 bytes
// Index Padding - 0 bytes
// CRC32 - 4 bytes
// Total - 8 bytes
lzma_vli expected_size = 8;
assert_uint_eq(lzma_index_hash_size(index_hash), expected_size);
// Append additional small Record
assert_lzma_ret(lzma_index_hash_append(index_hash,
UNPADDED_SIZE_MIN, 1), LZMA_OK);
// Expected size should be:
// Index Indicator - 1 byte
// Number of Records - 1 byte
// List of Records - 4 bytes
// Index Padding - 2 bytes
// CRC32 - 4 bytes
// Total - 12 bytes
expected_size = 12;
assert_uint_eq(lzma_index_hash_size(index_hash), expected_size);
// Append a larger Record to the index_hash (3 bytes for each VLI)
const lzma_vli three_byte_vli = 0x10000;
assert_lzma_ret(lzma_index_hash_append(index_hash,
three_byte_vli, three_byte_vli), LZMA_OK);
// Expected size should be:
// Index Indicator - 1 byte
// Number of Records - 1 byte
// List of Records - 10 bytes
// Index Padding - 0 bytes
// CRC32 - 4 bytes
// Total - 16 bytes
expected_size = 16;
assert_uint_eq(lzma_index_hash_size(index_hash), expected_size);
lzma_index_hash_end(index_hash, NULL);
#endif
}
extern int
main(int argc, char **argv)
{
tuktest_start(argc, argv);
tuktest_run(test_lzma_index_hash_init);
tuktest_run(test_lzma_index_hash_append);
tuktest_run(test_lzma_index_hash_decode);
tuktest_run(test_lzma_index_hash_size);
return tuktest_end();
}
|