1 /*
2  * LZMA2 decoder
3  *
4  * Authors: Lasse Collin <lasse.collin@tukaani.org>
5  *          Igor Pavlov <http://7-zip.org/>
6  *
7  * This file has been put into the public domain.
8  * You can do whatever you want with this file.
9  */
10 
11 #include "xz_private.h"
12 #include "xz_lzma2.h"
13 
14 /*
15  * Range decoder initialization eats the first five bytes of each LZMA chunk.
16  */
17 #define RC_INIT_BYTES 5
18 
19 /*
20  * Minimum number of usable input buffer to safely decode one LZMA symbol.
21  * The worst case is that we decode 22 bits using probabilities and 26
22  * direct bits. This may decode at maximum of 20 bytes of input. However,
23  * lzma_main() does an extra normalization before returning, thus we
24  * need to put 21 here.
25  */
26 #define LZMA_IN_REQUIRED 21
27 
28 /*
29  * Dictionary (history buffer)
30  *
31  * These are always true:
32  *    start <= pos <= full <= end
33  *    pos <= limit <= end
34  *
35  * In multi-call mode, also these are true:
36  *    end == size
37  *    size <= size_max
38  *    allocated <= size
39  *
40  * Most of these variables are size_t to support single-call mode,
41  * in which the dictionary variables address the actual output
42  * buffer directly.
43  */
44 struct dictionary {
45 	/* Beginning of the history buffer */
46 	uint8_t *buf;
47 
48 	/* Old position in buf (before decoding more data) */
49 	size_t start;
50 
51 	/* Position in buf */
52 	size_t pos;
53 
54 	/*
55 	 * How full dictionary is. This is used to detect corrupt input that
56 	 * would read beyond the beginning of the uncompressed stream.
57 	 */
58 	size_t full;
59 
60 	/* Write limit; we don't write to buf[limit] or later bytes. */
61 	size_t limit;
62 
63 	/*
64 	 * End of the dictionary buffer. In multi-call mode, this is
65 	 * the same as the dictionary size. In single-call mode, this
66 	 * indicates the size of the output buffer.
67 	 */
68 	size_t end;
69 
70 	/*
71 	 * Size of the dictionary as specified in Block Header. This is used
72 	 * together with "full" to detect corrupt input that would make us
73 	 * read beyond the beginning of the uncompressed stream.
74 	 */
75 	uint32_t size;
76 
77 	/*
78 	 * Maximum allowed dictionary size in multi-call mode.
79 	 * This is ignored in single-call mode.
80 	 */
81 	uint32_t size_max;
82 
83 	/*
84 	 * Amount of memory currently allocated for the dictionary.
85 	 * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
86 	 * size_max is always the same as the allocated size.)
87 	 */
88 	uint32_t allocated;
89 
90 	/* Operation mode */
91 	enum xz_mode mode;
92 };
93 
94 /* Range decoder */
95 struct rc_dec {
96 	uint32_t range;
97 	uint32_t code;
98 
99 	/*
100 	 * Number of initializing bytes remaining to be read
101 	 * by rc_read_init().
102 	 */
103 	uint32_t init_bytes_left;
104 
105 	/*
106 	 * Buffer from which we read our input. It can be either
107 	 * temp.buf or the caller-provided input buffer.
108 	 */
109 	const uint8_t *in;
110 	size_t in_pos;
111 	size_t in_limit;
112 };
113 
114 /* Probabilities for a length decoder. */
115 struct lzma_len_dec {
116 	/* Probability of match length being at least 10 */
117 	uint16_t choice;
118 
119 	/* Probability of match length being at least 18 */
120 	uint16_t choice2;
121 
122 	/* Probabilities for match lengths 2-9 */
123 	uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
124 
125 	/* Probabilities for match lengths 10-17 */
126 	uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
127 
128 	/* Probabilities for match lengths 18-273 */
129 	uint16_t high[LEN_HIGH_SYMBOLS];
130 };
131 
132 struct lzma_dec {
133 	/* Distances of latest four matches */
134 	uint32_t rep0;
135 	uint32_t rep1;
136 	uint32_t rep2;
137 	uint32_t rep3;
138 
139 	/* Types of the most recently seen LZMA symbols */
140 	enum lzma_state state;
141 
142 	/*
143 	 * Length of a match. This is updated so that dict_repeat can
144 	 * be called again to finish repeating the whole match.
145 	 */
146 	uint32_t len;
147 
148 	/*
149 	 * LZMA properties or related bit masks (number of literal
150 	 * context bits, a mask dervied from the number of literal
151 	 * position bits, and a mask dervied from the number
152 	 * position bits)
153 	 */
154 	uint32_t lc;
155 	uint32_t literal_pos_mask; /* (1 << lp) - 1 */
156 	uint32_t pos_mask;         /* (1 << pb) - 1 */
157 
158 	/* If 1, it's a match. Otherwise it's a single 8-bit literal. */
159 	uint16_t is_match[STATES][POS_STATES_MAX];
160 
161 	/* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
162 	uint16_t is_rep[STATES];
163 
164 	/*
165 	 * If 0, distance of a repeated match is rep0.
166 	 * Otherwise check is_rep1.
167 	 */
168 	uint16_t is_rep0[STATES];
169 
170 	/*
171 	 * If 0, distance of a repeated match is rep1.
172 	 * Otherwise check is_rep2.
173 	 */
174 	uint16_t is_rep1[STATES];
175 
176 	/* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
177 	uint16_t is_rep2[STATES];
178 
179 	/*
180 	 * If 1, the repeated match has length of one byte. Otherwise
181 	 * the length is decoded from rep_len_decoder.
182 	 */
183 	uint16_t is_rep0_long[STATES][POS_STATES_MAX];
184 
185 	/*
186 	 * Probability tree for the highest two bits of the match
187 	 * distance. There is a separate probability tree for match
188 	 * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
189 	 */
190 	uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
191 
192 	/*
193 	 * Probility trees for additional bits for match distance
194 	 * when the distance is in the range [4, 127].
195 	 */
196 	uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
197 
198 	/*
199 	 * Probability tree for the lowest four bits of a match
200 	 * distance that is equal to or greater than 128.
201 	 */
202 	uint16_t dist_align[ALIGN_SIZE];
203 
204 	/* Length of a normal match */
205 	struct lzma_len_dec match_len_dec;
206 
207 	/* Length of a repeated match */
208 	struct lzma_len_dec rep_len_dec;
209 
210 	/* Probabilities of literals */
211 	uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
212 };
213 
214 struct lzma2_dec {
215 	/* Position in xz_dec_lzma2_run(). */
216 	enum lzma2_seq {
217 		SEQ_CONTROL,
218 		SEQ_UNCOMPRESSED_1,
219 		SEQ_UNCOMPRESSED_2,
220 		SEQ_COMPRESSED_0,
221 		SEQ_COMPRESSED_1,
222 		SEQ_PROPERTIES,
223 		SEQ_LZMA_PREPARE,
224 		SEQ_LZMA_RUN,
225 		SEQ_COPY
226 	} sequence;
227 
228 	/* Next position after decoding the compressed size of the chunk. */
229 	enum lzma2_seq next_sequence;
230 
231 	/* Uncompressed size of LZMA chunk (2 MiB at maximum) */
232 	uint32_t uncompressed;
233 
234 	/*
235 	 * Compressed size of LZMA chunk or compressed/uncompressed
236 	 * size of uncompressed chunk (64 KiB at maximum)
237 	 */
238 	uint32_t compressed;
239 
240 	/*
241 	 * True if dictionary reset is needed. This is false before
242 	 * the first chunk (LZMA or uncompressed).
243 	 */
244 	bool need_dict_reset;
245 
246 	/*
247 	 * True if new LZMA properties are needed. This is false
248 	 * before the first LZMA chunk.
249 	 */
250 	bool need_props;
251 };
252 
253 struct xz_dec_lzma2 {
254 	/*
255 	 * The order below is important on x86 to reduce code size and
256 	 * it shouldn't hurt on other platforms. Everything up to and
257 	 * including lzma.pos_mask are in the first 128 bytes on x86-32,
258 	 * which allows using smaller instructions to access those
259 	 * variables. On x86-64, fewer variables fit into the first 128
260 	 * bytes, but this is still the best order without sacrificing
261 	 * the readability by splitting the structures.
262 	 */
263 	struct rc_dec rc;
264 	struct dictionary dict;
265 	struct lzma2_dec lzma2;
266 	struct lzma_dec lzma;
267 
268 	/*
269 	 * Temporary buffer which holds small number of input bytes between
270 	 * decoder calls. See lzma2_lzma() for details.
271 	 */
272 	struct {
273 		uint32_t size;
274 		uint8_t buf[3 * LZMA_IN_REQUIRED];
275 	} temp;
276 };
277 
278 /**************
279  * Dictionary *
280  **************/
281 
282 /*
283  * Reset the dictionary state. When in single-call mode, set up the beginning
284  * of the dictionary to point to the actual output buffer.
285  */
dict_reset(struct dictionary * dict,struct xz_buf * b)286 static void XZ_FUNC dict_reset(struct dictionary *dict, struct xz_buf *b)
287 {
288 	if (DEC_IS_SINGLE(dict->mode)) {
289 		dict->buf = b->out + b->out_pos;
290 		dict->end = b->out_size - b->out_pos;
291 	}
292 
293 	dict->start = 0;
294 	dict->pos = 0;
295 	dict->limit = 0;
296 	dict->full = 0;
297 }
298 
299 /* Set dictionary write limit */
dict_limit(struct dictionary * dict,size_t out_max)300 static void XZ_FUNC dict_limit(struct dictionary *dict, size_t out_max)
301 {
302 	if (dict->end - dict->pos <= out_max)
303 		dict->limit = dict->end;
304 	else
305 		dict->limit = dict->pos + out_max;
306 }
307 
308 /* Return true if at least one byte can be written into the dictionary. */
dict_has_space(const struct dictionary * dict)309 static __always_inline bool XZ_FUNC dict_has_space(const struct dictionary *dict)
310 {
311 	return dict->pos < dict->limit;
312 }
313 
314 /*
315  * Get a byte from the dictionary at the given distance. The distance is
316  * assumed to valid, or as a special case, zero when the dictionary is
317  * still empty. This special case is needed for single-call decoding to
318  * avoid writing a '\0' to the end of the destination buffer.
319  */
dict_get(const struct dictionary * dict,uint32_t dist)320 static __always_inline uint32_t XZ_FUNC dict_get(
321 		const struct dictionary *dict, uint32_t dist)
322 {
323 	size_t offset = dict->pos - dist - 1;
324 
325 	if (dist >= dict->pos)
326 		offset += dict->end;
327 
328 	return dict->full > 0 ? dict->buf[offset] : 0;
329 }
330 
331 /*
332  * Put one byte into the dictionary. It is assumed that there is space for it.
333  */
dict_put(struct dictionary * dict,uint8_t byte)334 static inline void XZ_FUNC dict_put(struct dictionary *dict, uint8_t byte)
335 {
336 	dict->buf[dict->pos++] = byte;
337 
338 	if (dict->full < dict->pos)
339 		dict->full = dict->pos;
340 }
341 
342 /*
343  * Repeat given number of bytes from the given distance. If the distance is
344  * invalid, false is returned. On success, true is returned and *len is
345  * updated to indicate how many bytes were left to be repeated.
346  */
dict_repeat(struct dictionary * dict,uint32_t * len,uint32_t dist)347 static bool XZ_FUNC dict_repeat(
348 		struct dictionary *dict, uint32_t *len, uint32_t dist)
349 {
350 	size_t back;
351 	uint32_t left;
352 
353 	if (dist >= dict->full || dist >= dict->size)
354 		return false;
355 
356 	left = min_t(size_t, dict->limit - dict->pos, *len);
357 	*len -= left;
358 
359 	back = dict->pos - dist - 1;
360 	if (dist >= dict->pos)
361 		back += dict->end;
362 
363 	do {
364 		dict->buf[dict->pos++] = dict->buf[back++];
365 		if (back == dict->end)
366 			back = 0;
367 	} while (--left > 0);
368 
369 	if (dict->full < dict->pos)
370 		dict->full = dict->pos;
371 
372 	return true;
373 }
374 
375 /* Copy uncompressed data as is from input to dictionary and output buffers. */
dict_uncompressed(struct dictionary * dict,struct xz_buf * b,uint32_t * left)376 static void XZ_FUNC dict_uncompressed(
377 		struct dictionary *dict, struct xz_buf *b, uint32_t *left)
378 {
379 	size_t copy_size;
380 
381 	while (*left > 0 && b->in_pos < b->in_size
382 			&& b->out_pos < b->out_size) {
383 		copy_size = min(b->in_size - b->in_pos,
384 				b->out_size - b->out_pos);
385 		if (copy_size > dict->end - dict->pos)
386 			copy_size = dict->end - dict->pos;
387 		if (copy_size > *left)
388 			copy_size = *left;
389 
390 		*left -= copy_size;
391 
392 		memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
393 		dict->pos += copy_size;
394 
395 		if (dict->full < dict->pos)
396 			dict->full = dict->pos;
397 
398 		if (DEC_IS_MULTI(dict->mode)) {
399 			if (dict->pos == dict->end)
400 				dict->pos = 0;
401 
402 			memcpy(b->out + b->out_pos, b->in + b->in_pos,
403 					copy_size);
404 		}
405 
406 		dict->start = dict->pos;
407 
408 		b->out_pos += copy_size;
409 		b->in_pos += copy_size;
410 	}
411 }
412 
413 /*
414  * Flush pending data from dictionary to b->out. It is assumed that there is
415  * enough space in b->out. This is guaranteed because caller uses dict_limit()
416  * before decoding data into the dictionary.
417  */
dict_flush(struct dictionary * dict,struct xz_buf * b)418 static uint32_t XZ_FUNC dict_flush(struct dictionary *dict, struct xz_buf *b)
419 {
420 	size_t copy_size = dict->pos - dict->start;
421 
422 	if (DEC_IS_MULTI(dict->mode)) {
423 		if (dict->pos == dict->end)
424 			dict->pos = 0;
425 
426 		memcpy(b->out + b->out_pos, dict->buf + dict->start,
427 				copy_size);
428 	}
429 
430 	dict->start = dict->pos;
431 	b->out_pos += copy_size;
432 	return copy_size;
433 }
434 
435 /*****************
436  * Range decoder *
437  *****************/
438 
439 /* Reset the range decoder. */
rc_reset(struct rc_dec * rc)440 static void XZ_FUNC rc_reset(struct rc_dec *rc)
441 {
442 	rc->range = (uint32_t)-1;
443 	rc->code = 0;
444 	rc->init_bytes_left = RC_INIT_BYTES;
445 }
446 
447 /*
448  * Read the first five initial bytes into rc->code if they haven't been
449  * read already. (Yes, the first byte gets completely ignored.)
450  */
rc_read_init(struct rc_dec * rc,struct xz_buf * b)451 static bool XZ_FUNC rc_read_init(struct rc_dec *rc, struct xz_buf *b)
452 {
453 	while (rc->init_bytes_left > 0) {
454 		if (b->in_pos == b->in_size)
455 			return false;
456 
457 		rc->code = (rc->code << 8) + b->in[b->in_pos++];
458 		--rc->init_bytes_left;
459 	}
460 
461 	return true;
462 }
463 
464 /* Return true if there may not be enough input for the next decoding loop. */
rc_limit_exceeded(const struct rc_dec * rc)465 static inline bool XZ_FUNC rc_limit_exceeded(const struct rc_dec *rc)
466 {
467 	return rc->in_pos > rc->in_limit;
468 }
469 
470 /*
471  * Return true if it is possible (from point of view of range decoder) that
472  * we have reached the end of the LZMA chunk.
473  */
rc_is_finished(const struct rc_dec * rc)474 static inline bool XZ_FUNC rc_is_finished(const struct rc_dec *rc)
475 {
476 	return rc->code == 0;
477 }
478 
479 /* Read the next input byte if needed. */
rc_normalize(struct rc_dec * rc)480 static __always_inline void XZ_FUNC rc_normalize(struct rc_dec *rc)
481 {
482 	if (rc->range < RC_TOP_VALUE) {
483 		rc->range <<= RC_SHIFT_BITS;
484 		rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
485 	}
486 }
487 
488 /*
489  * Decode one bit. In some versions, this function has been split in three
490  * functions so that the compiler is supposed to be able to more easily avoid
491  * an extra branch. In this particular version of the LZMA decoder, this
492  * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
493  * on x86). Using a non-split version results in nicer looking code too.
494  *
495  * NOTE: This must return an int. Do not make it return a bool or the speed
496  * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
497  * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
498  */
rc_bit(struct rc_dec * rc,uint16_t * prob)499 static __always_inline int XZ_FUNC rc_bit(struct rc_dec *rc, uint16_t *prob)
500 {
501 	uint32_t bound;
502 	int bit;
503 
504 	rc_normalize(rc);
505 	bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
506 	if (rc->code < bound) {
507 		rc->range = bound;
508 		*prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
509 		bit = 0;
510 	} else {
511 		rc->range -= bound;
512 		rc->code -= bound;
513 		*prob -= *prob >> RC_MOVE_BITS;
514 		bit = 1;
515 	}
516 
517 	return bit;
518 }
519 
520 /* Decode a bittree starting from the most significant bit. */
rc_bittree(struct rc_dec * rc,uint16_t * probs,uint32_t limit)521 static __always_inline uint32_t XZ_FUNC rc_bittree(
522 		struct rc_dec *rc, uint16_t *probs, uint32_t limit)
523 {
524 	uint32_t symbol = 1;
525 
526 	do {
527 		if (rc_bit(rc, &probs[symbol]))
528 			symbol = (symbol << 1) + 1;
529 		else
530 			symbol <<= 1;
531 	} while (symbol < limit);
532 
533 	return symbol;
534 }
535 
536 /* Decode a bittree starting from the least significant bit. */
rc_bittree_reverse(struct rc_dec * rc,uint16_t * probs,uint32_t * dest,uint32_t limit)537 static __always_inline void XZ_FUNC rc_bittree_reverse(struct rc_dec *rc,
538 		uint16_t *probs, uint32_t *dest, uint32_t limit)
539 {
540 	uint32_t symbol = 1;
541 	uint32_t i = 0;
542 
543 	do {
544 		if (rc_bit(rc, &probs[symbol])) {
545 			symbol = (symbol << 1) + 1;
546 			*dest += 1 << i;
547 		} else {
548 			symbol <<= 1;
549 		}
550 	} while (++i < limit);
551 }
552 
553 /* Decode direct bits (fixed fifty-fifty probability) */
rc_direct(struct rc_dec * rc,uint32_t * dest,uint32_t limit)554 static inline void XZ_FUNC rc_direct(
555 		struct rc_dec *rc, uint32_t *dest, uint32_t limit)
556 {
557 	uint32_t mask;
558 
559 	do {
560 		rc_normalize(rc);
561 		rc->range >>= 1;
562 		rc->code -= rc->range;
563 		mask = (uint32_t)0 - (rc->code >> 31);
564 		rc->code += rc->range & mask;
565 		*dest = (*dest << 1) + (mask + 1);
566 	} while (--limit > 0);
567 }
568 
569 /********
570  * LZMA *
571  ********/
572 
573 /* Get pointer to literal coder probability array. */
lzma_literal_probs(struct xz_dec_lzma2 * s)574 static uint16_t * XZ_FUNC lzma_literal_probs(struct xz_dec_lzma2 *s)
575 {
576 	uint32_t prev_byte = dict_get(&s->dict, 0);
577 	uint32_t low = prev_byte >> (8 - s->lzma.lc);
578 	uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
579 	return s->lzma.literal[low + high];
580 }
581 
582 /* Decode a literal (one 8-bit byte) */
lzma_literal(struct xz_dec_lzma2 * s)583 static void XZ_FUNC lzma_literal(struct xz_dec_lzma2 *s)
584 {
585 	uint16_t *probs;
586 	uint32_t symbol;
587 	uint32_t match_byte;
588 	uint32_t match_bit;
589 	uint32_t offset;
590 	uint32_t i;
591 
592 	probs = lzma_literal_probs(s);
593 
594 	if (lzma_state_is_literal(s->lzma.state)) {
595 		symbol = rc_bittree(&s->rc, probs, 0x100);
596 	} else {
597 		symbol = 1;
598 		match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
599 		offset = 0x100;
600 
601 		do {
602 			match_bit = match_byte & offset;
603 			match_byte <<= 1;
604 			i = offset + match_bit + symbol;
605 
606 			if (rc_bit(&s->rc, &probs[i])) {
607 				symbol = (symbol << 1) + 1;
608 				offset &= match_bit;
609 			} else {
610 				symbol <<= 1;
611 				offset &= ~match_bit;
612 			}
613 		} while (symbol < 0x100);
614 	}
615 
616 	dict_put(&s->dict, (uint8_t)symbol);
617 	lzma_state_literal(&s->lzma.state);
618 }
619 
620 /* Decode the length of the match into s->lzma.len. */
lzma_len(struct xz_dec_lzma2 * s,struct lzma_len_dec * l,uint32_t pos_state)621 static void XZ_FUNC lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
622 		uint32_t pos_state)
623 {
624 	uint16_t *probs;
625 	uint32_t limit;
626 
627 	if (!rc_bit(&s->rc, &l->choice)) {
628 		probs = l->low[pos_state];
629 		limit = LEN_LOW_SYMBOLS;
630 		s->lzma.len = MATCH_LEN_MIN;
631 	} else {
632 		if (!rc_bit(&s->rc, &l->choice2)) {
633 			probs = l->mid[pos_state];
634 			limit = LEN_MID_SYMBOLS;
635 			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
636 		} else {
637 			probs = l->high;
638 			limit = LEN_HIGH_SYMBOLS;
639 			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
640 					+ LEN_MID_SYMBOLS;
641 		}
642 	}
643 
644 	s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
645 }
646 
647 /* Decode a match. The distance will be stored in s->lzma.rep0. */
lzma_match(struct xz_dec_lzma2 * s,uint32_t pos_state)648 static void XZ_FUNC lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
649 {
650 	uint16_t *probs;
651 	uint32_t dist_slot;
652 	uint32_t limit;
653 
654 	lzma_state_match(&s->lzma.state);
655 
656 	s->lzma.rep3 = s->lzma.rep2;
657 	s->lzma.rep2 = s->lzma.rep1;
658 	s->lzma.rep1 = s->lzma.rep0;
659 
660 	lzma_len(s, &s->lzma.match_len_dec, pos_state);
661 
662 	probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
663 	dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
664 
665 	if (dist_slot < DIST_MODEL_START) {
666 		s->lzma.rep0 = dist_slot;
667 	} else {
668 		limit = (dist_slot >> 1) - 1;
669 		s->lzma.rep0 = 2 + (dist_slot & 1);
670 
671 		if (dist_slot < DIST_MODEL_END) {
672 			s->lzma.rep0 <<= limit;
673 			probs = s->lzma.dist_special + s->lzma.rep0
674 					- dist_slot - 1;
675 			rc_bittree_reverse(&s->rc, probs,
676 					&s->lzma.rep0, limit);
677 		} else {
678 			rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
679 			s->lzma.rep0 <<= ALIGN_BITS;
680 			rc_bittree_reverse(&s->rc, s->lzma.dist_align,
681 					&s->lzma.rep0, ALIGN_BITS);
682 		}
683 	}
684 }
685 
686 /*
687  * Decode a repeated match. The distance is one of the four most recently
688  * seen matches. The distance will be stored in s->lzma.rep0.
689  */
lzma_rep_match(struct xz_dec_lzma2 * s,uint32_t pos_state)690 static void XZ_FUNC lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
691 {
692 	uint32_t tmp;
693 
694 	if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
695 		if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
696 				s->lzma.state][pos_state])) {
697 			lzma_state_short_rep(&s->lzma.state);
698 			s->lzma.len = 1;
699 			return;
700 		}
701 	} else {
702 		if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
703 			tmp = s->lzma.rep1;
704 		} else {
705 			if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
706 				tmp = s->lzma.rep2;
707 			} else {
708 				tmp = s->lzma.rep3;
709 				s->lzma.rep3 = s->lzma.rep2;
710 			}
711 
712 			s->lzma.rep2 = s->lzma.rep1;
713 		}
714 
715 		s->lzma.rep1 = s->lzma.rep0;
716 		s->lzma.rep0 = tmp;
717 	}
718 
719 	lzma_state_long_rep(&s->lzma.state);
720 	lzma_len(s, &s->lzma.rep_len_dec, pos_state);
721 }
722 
723 /* LZMA decoder core */
lzma_main(struct xz_dec_lzma2 * s)724 static bool XZ_FUNC lzma_main(struct xz_dec_lzma2 *s)
725 {
726 	uint32_t pos_state;
727 
728 	/*
729 	 * If the dictionary was reached during the previous call, try to
730 	 * finish the possibly pending repeat in the dictionary.
731 	 */
732 	if (dict_has_space(&s->dict) && s->lzma.len > 0)
733 		dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
734 
735 	/*
736 	 * Decode more LZMA symbols. One iteration may consume up to
737 	 * LZMA_IN_REQUIRED - 1 bytes.
738 	 */
739 	while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
740 		pos_state = s->dict.pos & s->lzma.pos_mask;
741 
742 		if (!rc_bit(&s->rc, &s->lzma.is_match[
743 				s->lzma.state][pos_state])) {
744 			lzma_literal(s);
745 		} else {
746 			if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
747 				lzma_rep_match(s, pos_state);
748 			else
749 				lzma_match(s, pos_state);
750 
751 			if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
752 				return false;
753 		}
754 	}
755 
756 	/*
757 	 * Having the range decoder always normalized when we are outside
758 	 * this function makes it easier to correctly handle end of the chunk.
759 	 */
760 	rc_normalize(&s->rc);
761 
762 	return true;
763 }
764 
765 /*
766  * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
767  * here, because LZMA state may be reset without resetting the dictionary.
768  */
lzma_reset(struct xz_dec_lzma2 * s)769 static void XZ_FUNC lzma_reset(struct xz_dec_lzma2 *s)
770 {
771 	uint16_t *probs;
772 	size_t i;
773 
774 	s->lzma.state = STATE_LIT_LIT;
775 	s->lzma.rep0 = 0;
776 	s->lzma.rep1 = 0;
777 	s->lzma.rep2 = 0;
778 	s->lzma.rep3 = 0;
779 
780 	/*
781 	 * All probabilities are initialized to the same value. This hack
782 	 * makes the code smaller by avoiding a separate loop for each
783 	 * probability array.
784 	 *
785 	 * This could be optimized so that only that part of literal
786 	 * probabilities that are actually required. In the common case
787 	 * we would write 12 KiB less.
788 	 */
789 	probs = s->lzma.is_match[0];
790 	for (i = 0; i < PROBS_TOTAL; ++i)
791 		probs[i] = RC_BIT_MODEL_TOTAL / 2;
792 
793 	rc_reset(&s->rc);
794 }
795 
796 /*
797  * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
798  * from the decoded lp and pb values. On success, the LZMA decoder state is
799  * reset and true is returned.
800  */
lzma_props(struct xz_dec_lzma2 * s,uint8_t props)801 static bool XZ_FUNC lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
802 {
803 	if (props > (4 * 5 + 4) * 9 + 8)
804 		return false;
805 
806 	s->lzma.pos_mask = 0;
807 	while (props >= 9 * 5) {
808 		props -= 9 * 5;
809 		++s->lzma.pos_mask;
810 	}
811 
812 	s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
813 
814 	s->lzma.literal_pos_mask = 0;
815 	while (props >= 9) {
816 		props -= 9;
817 		++s->lzma.literal_pos_mask;
818 	}
819 
820 	s->lzma.lc = props;
821 
822 	if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
823 		return false;
824 
825 	s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
826 
827 	lzma_reset(s);
828 
829 	return true;
830 }
831 
832 /*********
833  * LZMA2 *
834  *********/
835 
836 /*
837  * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
838  * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
839  * wrapper function takes care of making the LZMA decoder's assumption safe.
840  *
841  * As long as there is plenty of input left to be decoded in the current LZMA
842  * chunk, we decode directly from the caller-supplied input buffer until
843  * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
844  * s->temp.buf, which (hopefully) gets filled on the next call to this
845  * function. We decode a few bytes from the temporary buffer so that we can
846  * continue decoding from the caller-supplied input buffer again.
847  */
lzma2_lzma(struct xz_dec_lzma2 * s,struct xz_buf * b)848 static bool XZ_FUNC lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
849 {
850 	size_t in_avail;
851 	uint32_t tmp;
852 
853 	in_avail = b->in_size - b->in_pos;
854 	if (s->temp.size > 0 || s->lzma2.compressed == 0) {
855 		tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
856 		if (tmp > s->lzma2.compressed - s->temp.size)
857 			tmp = s->lzma2.compressed - s->temp.size;
858 		if (tmp > in_avail)
859 			tmp = in_avail;
860 
861 		memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
862 
863 		if (s->temp.size + tmp == s->lzma2.compressed) {
864 			memzero(s->temp.buf + s->temp.size + tmp,
865 					sizeof(s->temp.buf)
866 						- s->temp.size - tmp);
867 			s->rc.in_limit = s->temp.size + tmp;
868 		} else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
869 			s->temp.size += tmp;
870 			b->in_pos += tmp;
871 			return true;
872 		} else {
873 			s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
874 		}
875 
876 		s->rc.in = s->temp.buf;
877 		s->rc.in_pos = 0;
878 
879 		if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
880 			return false;
881 
882 		s->lzma2.compressed -= s->rc.in_pos;
883 
884 		if (s->rc.in_pos < s->temp.size) {
885 			s->temp.size -= s->rc.in_pos;
886 			memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
887 					s->temp.size);
888 			return true;
889 		}
890 
891 		b->in_pos += s->rc.in_pos - s->temp.size;
892 		s->temp.size = 0;
893 	}
894 
895 	in_avail = b->in_size - b->in_pos;
896 	if (in_avail >= LZMA_IN_REQUIRED) {
897 		s->rc.in = b->in;
898 		s->rc.in_pos = b->in_pos;
899 
900 		if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
901 			s->rc.in_limit = b->in_pos + s->lzma2.compressed;
902 		else
903 			s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
904 
905 		if (!lzma_main(s))
906 			return false;
907 
908 		in_avail = s->rc.in_pos - b->in_pos;
909 		if (in_avail > s->lzma2.compressed)
910 			return false;
911 
912 		s->lzma2.compressed -= in_avail;
913 		b->in_pos = s->rc.in_pos;
914 	}
915 
916 	in_avail = b->in_size - b->in_pos;
917 	if (in_avail < LZMA_IN_REQUIRED) {
918 		if (in_avail > s->lzma2.compressed)
919 			in_avail = s->lzma2.compressed;
920 
921 		memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
922 		s->temp.size = in_avail;
923 		b->in_pos += in_avail;
924 	}
925 
926 	return true;
927 }
928 
929 /*
930  * Take care of the LZMA2 control layer, and forward the job of actual LZMA
931  * decoding or copying of uncompressed chunks to other functions.
932  */
xz_dec_lzma2_run(struct xz_dec_lzma2 * s,struct xz_buf * b)933 XZ_EXTERN NOINLINE enum xz_ret XZ_FUNC xz_dec_lzma2_run(
934 		struct xz_dec_lzma2 *s, struct xz_buf *b)
935 {
936 	uint32_t tmp;
937 
938 	while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
939 		switch (s->lzma2.sequence) {
940 		case SEQ_CONTROL:
941 			/*
942 			 * LZMA2 control byte
943 			 *
944 			 * Exact values:
945 			 *   0x00   End marker
946 			 *   0x01   Dictionary reset followed by
947 			 *          an uncompressed chunk
948 			 *   0x02   Uncompressed chunk (no dictionary reset)
949 			 *
950 			 * Highest three bits (s->control & 0xE0):
951 			 *   0xE0   Dictionary reset, new properties and state
952 			 *          reset, followed by LZMA compressed chunk
953 			 *   0xC0   New properties and state reset, followed
954 			 *          by LZMA compressed chunk (no dictionary
955 			 *          reset)
956 			 *   0xA0   State reset using old properties,
957 			 *          followed by LZMA compressed chunk (no
958 			 *          dictionary reset)
959 			 *   0x80   LZMA chunk (no dictionary or state reset)
960 			 *
961 			 * For LZMA compressed chunks, the lowest five bits
962 			 * (s->control & 1F) are the highest bits of the
963 			 * uncompressed size (bits 16-20).
964 			 *
965 			 * A new LZMA2 stream must begin with a dictionary
966 			 * reset. The first LZMA chunk must set new
967 			 * properties and reset the LZMA state.
968 			 *
969 			 * Values that don't match anything described above
970 			 * are invalid and we return XZ_DATA_ERROR.
971 			 */
972 			tmp = b->in[b->in_pos++];
973 
974 			if (tmp == 0x00)
975 				return XZ_STREAM_END;
976 
977 			if (tmp >= 0xE0 || tmp == 0x01) {
978 				s->lzma2.need_props = true;
979 				s->lzma2.need_dict_reset = false;
980 				dict_reset(&s->dict, b);
981 			} else if (s->lzma2.need_dict_reset) {
982 				return XZ_DATA_ERROR;
983 			}
984 
985 			if (tmp >= 0x80) {
986 				s->lzma2.uncompressed = (tmp & 0x1F) << 16;
987 				s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
988 
989 				if (tmp >= 0xC0) {
990 					/*
991 					 * When there are new properties,
992 					 * state reset is done at
993 					 * SEQ_PROPERTIES.
994 					 */
995 					s->lzma2.need_props = false;
996 					s->lzma2.next_sequence
997 							= SEQ_PROPERTIES;
998 				} else if (s->lzma2.need_props) {
999 					return XZ_DATA_ERROR;
1000 				} else {
1001 					s->lzma2.next_sequence
1002 							= SEQ_LZMA_PREPARE;
1003 					if (tmp >= 0xA0)
1004 						lzma_reset(s);
1005 				}
1006 			} else {
1007 				if (tmp > 0x02)
1008 					return XZ_DATA_ERROR;
1009 
1010 				s->lzma2.sequence = SEQ_COMPRESSED_0;
1011 				s->lzma2.next_sequence = SEQ_COPY;
1012 			}
1013 
1014 			break;
1015 
1016 		case SEQ_UNCOMPRESSED_1:
1017 			s->lzma2.uncompressed
1018 					+= (uint32_t)b->in[b->in_pos++] << 8;
1019 			s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
1020 			break;
1021 
1022 		case SEQ_UNCOMPRESSED_2:
1023 			s->lzma2.uncompressed
1024 					+= (uint32_t)b->in[b->in_pos++] + 1;
1025 			s->lzma2.sequence = SEQ_COMPRESSED_0;
1026 			break;
1027 
1028 		case SEQ_COMPRESSED_0:
1029 			s->lzma2.compressed
1030 					= (uint32_t)b->in[b->in_pos++] << 8;
1031 			s->lzma2.sequence = SEQ_COMPRESSED_1;
1032 			break;
1033 
1034 		case SEQ_COMPRESSED_1:
1035 			s->lzma2.compressed
1036 					+= (uint32_t)b->in[b->in_pos++] + 1;
1037 			s->lzma2.sequence = s->lzma2.next_sequence;
1038 			break;
1039 
1040 		case SEQ_PROPERTIES:
1041 			if (!lzma_props(s, b->in[b->in_pos++]))
1042 				return XZ_DATA_ERROR;
1043 
1044 			s->lzma2.sequence = SEQ_LZMA_PREPARE;
1045 
1046 		case SEQ_LZMA_PREPARE:
1047 			if (s->lzma2.compressed < RC_INIT_BYTES)
1048 				return XZ_DATA_ERROR;
1049 
1050 			if (!rc_read_init(&s->rc, b))
1051 				return XZ_OK;
1052 
1053 			s->lzma2.compressed -= RC_INIT_BYTES;
1054 			s->lzma2.sequence = SEQ_LZMA_RUN;
1055 
1056 		case SEQ_LZMA_RUN:
1057 			/*
1058 			 * Set dictionary limit to indicate how much we want
1059 			 * to be encoded at maximum. Decode new data into the
1060 			 * dictionary. Flush the new data from dictionary to
1061 			 * b->out. Check if we finished decoding this chunk.
1062 			 * In case the dictionary got full but we didn't fill
1063 			 * the output buffer yet, we may run this loop
1064 			 * multiple times without changing s->lzma2.sequence.
1065 			 */
1066 			dict_limit(&s->dict, min_t(size_t,
1067 					b->out_size - b->out_pos,
1068 					s->lzma2.uncompressed));
1069 			if (!lzma2_lzma(s, b))
1070 				return XZ_DATA_ERROR;
1071 
1072 			s->lzma2.uncompressed -= dict_flush(&s->dict, b);
1073 
1074 			if (s->lzma2.uncompressed == 0) {
1075 				if (s->lzma2.compressed > 0 || s->lzma.len > 0
1076 						|| !rc_is_finished(&s->rc))
1077 					return XZ_DATA_ERROR;
1078 
1079 				rc_reset(&s->rc);
1080 				s->lzma2.sequence = SEQ_CONTROL;
1081 			} else if (b->out_pos == b->out_size
1082 					|| (b->in_pos == b->in_size
1083 						&& s->temp.size
1084 						< s->lzma2.compressed)) {
1085 				return XZ_OK;
1086 			}
1087 
1088 			break;
1089 
1090 		case SEQ_COPY:
1091 			dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
1092 			if (s->lzma2.compressed > 0)
1093 				return XZ_OK;
1094 
1095 			s->lzma2.sequence = SEQ_CONTROL;
1096 			break;
1097 		}
1098 	}
1099 
1100 	return XZ_OK;
1101 }
1102 
xz_dec_lzma2_create(enum xz_mode mode,uint32_t dict_max)1103 XZ_EXTERN struct xz_dec_lzma2 * XZ_FUNC xz_dec_lzma2_create(
1104 		enum xz_mode mode, uint32_t dict_max)
1105 {
1106 	struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
1107 	if (s == NULL)
1108 		return NULL;
1109 
1110 	s->dict.mode = mode;
1111 	s->dict.size_max = dict_max;
1112 
1113 	if (DEC_IS_PREALLOC(mode)) {
1114 		s->dict.buf = vmalloc(dict_max);
1115 		if (s->dict.buf == NULL) {
1116 			kfree(s);
1117 			return NULL;
1118 		}
1119 	} else if (DEC_IS_DYNALLOC(mode)) {
1120 		s->dict.buf = NULL;
1121 		s->dict.allocated = 0;
1122 	}
1123 
1124 	return s;
1125 }
1126 
xz_dec_lzma2_reset(struct xz_dec_lzma2 * s,uint8_t props)1127 XZ_EXTERN enum xz_ret XZ_FUNC xz_dec_lzma2_reset(
1128 		struct xz_dec_lzma2 *s, uint8_t props)
1129 {
1130 	/* This limits dictionary size to 3 GiB to keep parsing simpler. */
1131 	if (props > 39)
1132 		return XZ_OPTIONS_ERROR;
1133 
1134 	s->dict.size = 2 + (props & 1);
1135 	s->dict.size <<= (props >> 1) + 11;
1136 
1137 	if (DEC_IS_MULTI(s->dict.mode)) {
1138 		if (s->dict.size > s->dict.size_max)
1139 			return XZ_MEMLIMIT_ERROR;
1140 
1141 		s->dict.end = s->dict.size;
1142 
1143 		if (DEC_IS_DYNALLOC(s->dict.mode)) {
1144 			if (s->dict.allocated < s->dict.size) {
1145 				vfree(s->dict.buf);
1146 				s->dict.buf = vmalloc(s->dict.size);
1147 				if (s->dict.buf == NULL) {
1148 					s->dict.allocated = 0;
1149 					return XZ_MEM_ERROR;
1150 				}
1151 			}
1152 		}
1153 	}
1154 
1155 	s->lzma.len = 0;
1156 
1157 	s->lzma2.sequence = SEQ_CONTROL;
1158 	s->lzma2.need_dict_reset = true;
1159 
1160 	s->temp.size = 0;
1161 
1162 	return XZ_OK;
1163 }
1164 
xz_dec_lzma2_end(struct xz_dec_lzma2 * s)1165 XZ_EXTERN void XZ_FUNC xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
1166 {
1167 	if (DEC_IS_MULTI(s->dict.mode))
1168 		vfree(s->dict.buf);
1169 
1170 	kfree(s);
1171 }
1172