| 1 | //! Deflate format (RFC 1951) support. |
| 2 | //! |
| 3 | //! This module provides functionality for decompressing raw deflated streams via [`Inflate`] and gzip files (RFC 1952) via [`gzip_decode`] and compressing raw or zlib streams via [`Deflate`]. |
| 4 | //! |
| 5 | //! [`Deflate`]: ./struct.Deflate.html |
| 6 | //! [`Inflate`]: ./struct.Inflate.html |
| 7 | //! [`gzip_decode`]: ./fn.gzip_decode.html |
| 8 | //! |
| 9 | //! # Examples |
| 10 | //! |
| 11 | //! Decompressing full input buffer into sufficiently large output buffer: |
| 12 | //! ``` |
| 13 | //! # use nihav_core::compr::DecompressError; |
| 14 | //! use nihav_core::compr::deflate::Inflate; |
| 15 | //! |
| 16 | //! # fn decompress(input: &[u8]) -> Result<(), DecompressError> { |
| 17 | //! # let mut output_buffer = [0u8; 16]; |
| 18 | //! let output_length = Inflate::uncompress(input, &mut output_buffer)?; |
| 19 | //! # Ok(()) |
| 20 | //! # } |
| 21 | //! ``` |
| 22 | //! |
| 23 | //! Decompressing input chunks into portions of output: |
| 24 | //! ``` |
| 25 | //! use nihav_core::compr::DecompressError; |
| 26 | //! use nihav_core::compr::deflate::Inflate; |
| 27 | //! |
| 28 | //! # fn decompress(input_data: &[u8]) -> Result<(), DecompressError> { |
| 29 | //! let mut inflate = Inflate::new(); |
| 30 | //! let mut dst_buf: Vec<u8> = Vec::new(); |
| 31 | //! let mut output_chunk = [0u8; 1024]; |
| 32 | //! for src in input_data.chunks(512) { |
| 33 | //! let mut repeat = false; |
| 34 | //! loop { |
| 35 | //! let ret = inflate.decompress_data(src, &mut output_chunk, repeat); |
| 36 | //! match ret { |
| 37 | //! Ok(len) => { // we got a buffer decoded successfully to the end |
| 38 | //! dst_buf.extend_from_slice(&output_chunk[..len]); |
| 39 | //! break; |
| 40 | //! }, |
| 41 | //! Err(DecompressError::ShortData) => { // this block of data was fully read |
| 42 | //! break; |
| 43 | //! }, |
| 44 | //! Err(DecompressError::OutputFull) => { |
| 45 | //! // the output buffer is full, flush it and continue decoding the same block |
| 46 | //! repeat = true; |
| 47 | //! dst_buf.extend_from_slice(&output_chunk); |
| 48 | //! }, |
| 49 | //! Err(err) => { |
| 50 | //! return Err(err); |
| 51 | //! }, |
| 52 | //! } |
| 53 | //! } |
| 54 | //! } |
| 55 | //! # Ok(()) |
| 56 | //! # } |
| 57 | //! ``` |
| 58 | //! |
| 59 | //! Compressing input buffer into zlib stream: |
| 60 | //! ``` |
| 61 | //! use nihav_core::compr::deflate::{Deflate, DeflateMode, DeflateWriter}; |
| 62 | //! |
| 63 | //! # fn compress(input: &[u8]) { |
| 64 | //! let output = Vec::with_capacity(input.len() * 65540 / 65535 + 6); |
| 65 | //! let mut writer = DeflateWriter::new(output); |
| 66 | //! let mut compr = Deflate::new(DeflateMode::Fast); |
| 67 | //! compr.write_zlib_header(&mut writer); |
| 68 | //! compr.compress(input, &mut writer); |
| 69 | //! compr.compress_end(&mut writer); |
| 70 | //! let output = writer.end(); |
| 71 | //! # } |
| 72 | //! ``` |
| 73 | |
| 74 | use crate::options::NAOptionDefinitionType; |
| 75 | use crate::io::byteio::*; |
| 76 | use crate::io::bitreader::*; |
| 77 | use crate::io::codebook::*; |
| 78 | use super::*; |
| 79 | |
| 80 | const NUM_LITERALS: usize = 287; |
| 81 | const NUM_DISTS: usize = 32; |
| 82 | |
| 83 | struct FixedLenCodeReader {} |
| 84 | |
| 85 | impl CodebookDescReader<u16> for FixedLenCodeReader { |
| 86 | fn bits(&mut self, idx: usize) -> u8 { |
| 87 | if idx < 144 { 8 } |
| 88 | else if idx < 256 { 9 } |
| 89 | else if idx < 280 { 7 } |
| 90 | else { 8 } |
| 91 | } |
| 92 | #[allow(clippy::identity_op)] |
| 93 | fn code(&mut self, idx: usize) -> u32 { |
| 94 | let base = idx as u32; |
| 95 | let bits = self.bits(idx); |
| 96 | if idx < 144 { reverse_bits(base + 0x30, bits) } |
| 97 | else if idx < 256 { reverse_bits(base + 0x190 - 144, bits) } |
| 98 | else if idx < 280 { reverse_bits(base + 0x000 - 256, bits) } |
| 99 | else { reverse_bits(base + 0xC0 - 280, bits) } |
| 100 | } |
| 101 | fn sym (&mut self, idx: usize) -> u16 { idx as u16 } |
| 102 | fn len(&mut self) -> usize { NUM_LITERALS + 1 } |
| 103 | } |
| 104 | |
| 105 | #[derive(Clone,Copy,Default)] |
| 106 | struct BitReaderState { |
| 107 | pos: usize, |
| 108 | bitbuf: u32, |
| 109 | bits: u8, |
| 110 | } |
| 111 | |
| 112 | struct CurrentSource<'a> { |
| 113 | src: &'a [u8], |
| 114 | br: BitReaderState, |
| 115 | } |
| 116 | |
| 117 | impl<'a> CurrentSource<'a> { |
| 118 | fn new(src: &'a [u8], br: BitReaderState) -> Self { |
| 119 | let mut newsrc = Self { src, br }; |
| 120 | newsrc.br.pos = 0; |
| 121 | newsrc.refill(); |
| 122 | newsrc |
| 123 | } |
| 124 | fn reinit(src: &'a [u8], br: BitReaderState) -> Self { |
| 125 | let mut newsrc = Self { src, br }; |
| 126 | newsrc.refill(); |
| 127 | newsrc |
| 128 | } |
| 129 | fn refill(&mut self) { |
| 130 | while (self.br.bits <= 24) && (self.br.pos < self.src.len()) { |
| 131 | self.br.bitbuf |= u32::from(self.src[self.br.pos]) << self.br.bits; |
| 132 | self.br.bits += 8; |
| 133 | self.br.pos += 1; |
| 134 | } |
| 135 | } |
| 136 | fn skip_cache(&mut self, nbits: u8) { |
| 137 | self.br.bitbuf >>= nbits; |
| 138 | self.br.bits -= nbits; |
| 139 | } |
| 140 | fn read(&mut self, nbits: u8) -> BitReaderResult<u32> { |
| 141 | if nbits == 0 { return Ok(0); } |
| 142 | if nbits > 16 { return Err(BitReaderError::TooManyBitsRequested); } |
| 143 | if self.br.bits < nbits { |
| 144 | self.refill(); |
| 145 | if self.br.bits < nbits { return Err(BitReaderError::BitstreamEnd); } |
| 146 | } |
| 147 | let ret = self.br.bitbuf & ((1 << nbits) - 1); |
| 148 | self.skip_cache(nbits); |
| 149 | Ok(ret) |
| 150 | } |
| 151 | fn read_bool(&mut self) -> BitReaderResult<bool> { |
| 152 | if self.br.bits == 0 { |
| 153 | self.refill(); |
| 154 | if self.br.bits == 0 { return Err(BitReaderError::BitstreamEnd); } |
| 155 | } |
| 156 | let ret = (self.br.bitbuf & 1) != 0; |
| 157 | self.skip_cache(1); |
| 158 | Ok(ret) |
| 159 | } |
| 160 | fn peek(&mut self, nbits: u8) -> u32 { |
| 161 | if nbits == 0 || nbits > 16 { return 0; } |
| 162 | if self.br.bits < nbits { |
| 163 | self.refill(); |
| 164 | } |
| 165 | self.br.bitbuf & ((1 << nbits) - 1) |
| 166 | } |
| 167 | fn skip(&mut self, nbits: u32) -> BitReaderResult<()> { |
| 168 | if u32::from(self.br.bits) >= nbits { |
| 169 | self.skip_cache(nbits as u8); |
| 170 | } else { |
| 171 | unreachable!(); |
| 172 | } |
| 173 | Ok(()) |
| 174 | } |
| 175 | fn align(&mut self) { |
| 176 | let b = self.br.bits & 7; |
| 177 | if b != 0 { |
| 178 | self.skip_cache(b); |
| 179 | } |
| 180 | } |
| 181 | fn left(&self) -> isize { |
| 182 | ((self.src.len() as isize) - (self.br.pos as isize)) * 8 + (self.br.bits as isize) |
| 183 | } |
| 184 | } |
| 185 | |
| 186 | impl<'a, S: Copy> CodebookReader<S> for CurrentSource<'a> { |
| 187 | fn read_cb(&mut self, cb: &Codebook<S>) -> CodebookResult<S> { |
| 188 | let mut esc = true; |
| 189 | let mut idx = 0; |
| 190 | let mut lut_bits = cb.lut_bits; |
| 191 | let orig_br = self.br; |
| 192 | while esc { |
| 193 | let lut_idx = (self.peek(lut_bits) as usize) + (idx as usize); |
| 194 | if cb.table[lut_idx] == TABLE_FILL_VALUE { return Err(CodebookError::InvalidCode); } |
| 195 | let bits = cb.table[lut_idx] & 0x7F; |
| 196 | esc = (cb.table[lut_idx] & 0x80) != 0; |
| 197 | idx = (cb.table[lut_idx] >> 8) as usize; |
| 198 | let skip_bits = if esc { u32::from(lut_bits) } else { bits }; |
| 199 | if (skip_bits as isize) > self.left() { |
| 200 | self.br = orig_br; |
| 201 | self.refill(); |
| 202 | return Err(CodebookError::MemoryError); |
| 203 | } |
| 204 | self.skip(skip_bits as u32).unwrap(); |
| 205 | lut_bits = bits as u8; |
| 206 | } |
| 207 | Ok(cb.syms[idx]) |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | enum InflateState { |
| 212 | Start, |
| 213 | BlockStart, |
| 214 | BlockMode, |
| 215 | StaticBlockLen, |
| 216 | StaticBlockInvLen(u32), |
| 217 | StaticBlockCopy(usize), |
| 218 | FixedBlock, |
| 219 | FixedBlockLengthExt(usize, u8), |
| 220 | FixedBlockDist(usize), |
| 221 | FixedBlockDistExt(usize, usize, u8), |
| 222 | FixedBlockCopy(usize, usize), |
| 223 | FixedBlockLiteral(u8), |
| 224 | DynBlockHlit, |
| 225 | DynBlockHdist, |
| 226 | DynBlockHclen, |
| 227 | DynLengths(usize), |
| 228 | DynCodeLengths, |
| 229 | DynCodeLengthsAdd(usize), |
| 230 | DynBlock, |
| 231 | DynBlockLengthExt(usize, u8), |
| 232 | DynBlockDist(usize), |
| 233 | DynBlockDistExt(usize, usize, u8), |
| 234 | DynCopy(usize, usize), |
| 235 | DynBlockLiteral(u8), |
| 236 | End, |
| 237 | } |
| 238 | |
| 239 | ///! The decompressor for deflated streams (RFC 1951). |
| 240 | pub struct Inflate { |
| 241 | br: BitReaderState, |
| 242 | fix_len_cb: Codebook<u16>, |
| 243 | |
| 244 | buf: [u8; 65536], |
| 245 | bpos: usize, |
| 246 | output_idx: usize, |
| 247 | full_pos: usize, |
| 248 | |
| 249 | state: InflateState, |
| 250 | final_block: bool, |
| 251 | hlit: usize, |
| 252 | hdist: usize, |
| 253 | dyn_len_cb: Option<Codebook<u32>>, |
| 254 | dyn_lit_cb: Option<Codebook<u32>>, |
| 255 | dyn_dist_cb: Option<Codebook<u32>>, |
| 256 | len_lengths: [u8; 19], |
| 257 | all_lengths: [u8; NUM_LITERALS + NUM_DISTS], |
| 258 | cur_len_idx: usize, |
| 259 | } |
| 260 | |
| 261 | const LENGTH_ADD_BITS: [u8; 29] = [ |
| 262 | 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, |
| 263 | 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, |
| 264 | 4, 4, 4, 4, 5, 5, 5, 5, 0 |
| 265 | ]; |
| 266 | const LENGTH_BASE: [u16; 29] = [ |
| 267 | 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, |
| 268 | 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, |
| 269 | 67, 83, 99, 115, 131, 163, 195, 227, 258 |
| 270 | ]; |
| 271 | const DIST_ADD_BITS: [u8; 30] = [ |
| 272 | 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, |
| 273 | 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, |
| 274 | 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 |
| 275 | ]; |
| 276 | const DIST_BASE: [u16; 30] = [ |
| 277 | 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, |
| 278 | 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, |
| 279 | 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577 |
| 280 | ]; |
| 281 | const LEN_RECODE: [usize; 19] = [ |
| 282 | 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 |
| 283 | ]; |
| 284 | const REPEAT_BITS: [u8; 3] = [ 2, 3, 7 ]; |
| 285 | const REPEAT_BASE: [u8; 3] = [ 3, 3, 11 ]; |
| 286 | |
| 287 | macro_rules! read_bits { |
| 288 | ($self: expr, $csrc: expr, $bits: expr) => ({ |
| 289 | if $csrc.left() < $bits as isize { |
| 290 | $self.br = $csrc.br; |
| 291 | return Err(DecompressError::ShortData); |
| 292 | } |
| 293 | $csrc.read($bits).unwrap() |
| 294 | }) |
| 295 | } |
| 296 | |
| 297 | macro_rules! read_cb { |
| 298 | ($self: expr, $csrc: expr, $cb: expr) => ({ |
| 299 | let ret = $csrc.read_cb($cb); |
| 300 | if let Err(CodebookError::MemoryError) = ret { |
| 301 | $self.br = $csrc.br; |
| 302 | return Err(DecompressError::ShortData); |
| 303 | } |
| 304 | match ret { |
| 305 | Ok(val) => val, |
| 306 | Err(_) => { |
| 307 | $self.state = InflateState::End; |
| 308 | return Err(DecompressError::InvalidData); |
| 309 | }, |
| 310 | } |
| 311 | }) |
| 312 | } |
| 313 | |
| 314 | impl Inflate { |
| 315 | ///! Creates a new instance of `Inflate` struct. |
| 316 | pub fn new() -> Self { |
| 317 | let mut cr = FixedLenCodeReader {}; |
| 318 | let fix_len_cb = Codebook::new(&mut cr, CodebookMode::LSB).unwrap(); |
| 319 | Self { |
| 320 | br: BitReaderState::default(), |
| 321 | fix_len_cb, |
| 322 | |
| 323 | buf: [0; 65536], |
| 324 | bpos: 0, |
| 325 | output_idx: 0, |
| 326 | full_pos: 0, |
| 327 | |
| 328 | state: InflateState::Start, |
| 329 | final_block: false, |
| 330 | dyn_len_cb: None, |
| 331 | dyn_lit_cb: None, |
| 332 | dyn_dist_cb: None, |
| 333 | hlit: 0, |
| 334 | hdist: 0, |
| 335 | len_lengths: [0; 19], |
| 336 | all_lengths: [0; NUM_LITERALS + NUM_DISTS], |
| 337 | cur_len_idx: 0, |
| 338 | } |
| 339 | } |
| 340 | fn put_literal(&mut self, val: u8) { |
| 341 | self.buf[self.bpos] = val; |
| 342 | self.bpos = (self.bpos + 1) & (self.buf.len() - 1); |
| 343 | self.full_pos += 1; |
| 344 | } |
| 345 | fn lz_copy(&mut self, offset: usize, len: usize, dst: &mut [u8]) -> DecompressResult<()> { |
| 346 | let mask = self.buf.len() - 1; |
| 347 | if offset > self.full_pos { |
| 348 | return Err(DecompressError::InvalidData); |
| 349 | } |
| 350 | let cstart = (self.bpos.wrapping_sub(offset)) & mask; |
| 351 | for i in 0..len { |
| 352 | self.buf[(self.bpos + i) & mask] = self.buf[(cstart + i) & mask]; |
| 353 | dst[i] = self.buf[(cstart + i) & mask]; |
| 354 | } |
| 355 | self.bpos = (self.bpos + len) & mask; |
| 356 | self.full_pos += len; |
| 357 | Ok(()) |
| 358 | } |
| 359 | ///! Sets custom history for decoding an update for already decoded data. |
| 360 | pub fn set_dict(&mut self, dict: &[u8]) { |
| 361 | let len = dict.len().min(self.buf.len()); |
| 362 | let start = dict.len() - len; |
| 363 | self.buf[..len].copy_from_slice(&dict[start..]); |
| 364 | self.bpos = len; |
| 365 | self.full_pos = len; |
| 366 | } |
| 367 | ///! Reports whether decoder has finished decoding the input. |
| 368 | pub fn is_finished(&self) -> bool { |
| 369 | match self.state { |
| 370 | InflateState::End => true, |
| 371 | _ => false, |
| 372 | } |
| 373 | } |
| 374 | ///! Reports the current amount of bytes output into the destination buffer after the last run. |
| 375 | pub fn get_current_output_size(&self) -> usize { self.output_idx } |
| 376 | ///! Reports the total amount of bytes decoded so far. |
| 377 | pub fn get_total_output_size(&self) -> usize { self.bpos } |
| 378 | ///! Tries to decompress input data and write it to the output buffer. |
| 379 | ///! |
| 380 | ///! Since the decompressor can work with arbitrary input and output chunks its return value may have several meanings: |
| 381 | ///! * `Ok(len)` means the stream has been fully decoded and then number of bytes output into the destination buffer is returned. |
| 382 | ///! * [`DecompressError::ShortData`] means the input stream has been fully read but more data is needed. |
| 383 | ///! * [`DecompressError::OutputFull`] means the output buffer is full and should be flushed. Then decoding should continue on the same input block with `continue_block` parameter set to `true`. |
| 384 | ///! |
| 385 | ///! [`DecompressError::ShortData`]: ../enum.DecompressError.html#variant.ShortData |
| 386 | ///! [`DecompressError::OutputFull`]: ../enum.DecompressError.html#variant.OutputFull |
| 387 | pub fn decompress_data(&mut self, src: &[u8], dst: &mut [u8], continue_block: bool) -> DecompressResult<usize> { |
| 388 | self.decompress_data_internal(src, dst, continue_block, false) |
| 389 | } |
| 390 | ///! Tries to decompress whole input chunk to the output buffer. |
| 391 | pub fn decompress_block(&mut self, src: &[u8], dst: &mut [u8]) -> DecompressResult<usize> { |
| 392 | self.decompress_data_internal(src, dst, false, true) |
| 393 | } |
| 394 | #[allow(clippy::comparison_chain)] |
| 395 | fn decompress_data_internal(&mut self, src: &[u8], dst: &mut [u8], continue_block: bool, do_one_block: bool) -> DecompressResult<usize> { |
| 396 | if src.is_empty() || dst.is_empty() { |
| 397 | return Err(DecompressError::InvalidArgument); |
| 398 | } |
| 399 | let mut csrc = if !continue_block { |
| 400 | CurrentSource::new(src, self.br) |
| 401 | } else { |
| 402 | self.output_idx = 0; |
| 403 | CurrentSource::reinit(src, self.br) |
| 404 | }; |
| 405 | if do_one_block { |
| 406 | self.output_idx = 0; |
| 407 | } |
| 408 | // check for zlib stream header |
| 409 | if let (&InflateState::Start, true) = (&self.state, src.len() > 2) { |
| 410 | let cm = src[0] & 0xF; |
| 411 | let cinfo = src[0] >> 4; |
| 412 | let hdr = (u16::from(src[0]) << 8) | u16::from(src[1]); |
| 413 | if cm == 8 && cinfo <= 7 && (hdr % 31) == 0 { |
| 414 | csrc.skip(16).unwrap(); |
| 415 | } |
| 416 | } |
| 417 | 'main: loop { |
| 418 | match self.state { |
| 419 | InflateState::Start | InflateState::BlockStart => { |
| 420 | if csrc.left() == 0 { |
| 421 | if do_one_block { |
| 422 | return Ok(self.output_idx); |
| 423 | } |
| 424 | self.br = csrc.br; |
| 425 | return Err(DecompressError::ShortData); |
| 426 | } |
| 427 | self.final_block = csrc.read_bool().unwrap(); |
| 428 | self.state = InflateState::BlockMode; |
| 429 | }, |
| 430 | InflateState::BlockMode => { |
| 431 | let bmode = read_bits!(self, csrc, 2); |
| 432 | match bmode { |
| 433 | 0 => { |
| 434 | csrc.align(); |
| 435 | self.state = InflateState::StaticBlockLen; |
| 436 | }, |
| 437 | 1 => { self.state = InflateState::FixedBlock; }, |
| 438 | 2 => { self.state = InflateState::DynBlockHlit; }, |
| 439 | _ => { |
| 440 | self.state = InflateState::End; |
| 441 | return Err(DecompressError::InvalidHeader); |
| 442 | }, |
| 443 | }; |
| 444 | }, |
| 445 | InflateState::StaticBlockLen => { |
| 446 | let len = read_bits!(self, csrc, 16); |
| 447 | self.state = InflateState::StaticBlockInvLen(len); |
| 448 | }, |
| 449 | InflateState::StaticBlockInvLen(len) => { |
| 450 | let inv_len = read_bits!(self, csrc, 16); |
| 451 | if (len ^ inv_len) != 0xFFFF { |
| 452 | self.state = InflateState::End; |
| 453 | return Err(DecompressError::InvalidHeader); |
| 454 | } |
| 455 | self.state = InflateState::StaticBlockCopy(len as usize); |
| 456 | }, |
| 457 | InflateState::StaticBlockCopy(len) => { |
| 458 | for i in 0..len { |
| 459 | if csrc.left() < 8 { |
| 460 | self.br = csrc.br; |
| 461 | self.state = InflateState::StaticBlockCopy(len - i); |
| 462 | return Err(DecompressError::ShortData); |
| 463 | } |
| 464 | let val = csrc.read(8).unwrap() as u8; |
| 465 | self.put_literal(val); |
| 466 | } |
| 467 | self.state = InflateState::BlockStart; |
| 468 | } |
| 469 | InflateState::FixedBlock => { |
| 470 | let val = read_cb!(self, csrc, &self.fix_len_cb); |
| 471 | if val < 256 { |
| 472 | if self.output_idx >= dst.len() { |
| 473 | self.br = csrc.br; |
| 474 | self.state = InflateState::FixedBlockLiteral(val as u8); |
| 475 | return Err(DecompressError::OutputFull); |
| 476 | } |
| 477 | self.put_literal(val as u8); |
| 478 | dst[self.output_idx] = val as u8; |
| 479 | self.output_idx += 1; |
| 480 | } else if val == 256 { |
| 481 | if self.final_block { |
| 482 | self.state = InflateState::End; |
| 483 | return Ok(self.output_idx); |
| 484 | } else { |
| 485 | self.state = InflateState::BlockStart; |
| 486 | } |
| 487 | } else { |
| 488 | let len_idx = (val - 257) as usize; |
| 489 | if len_idx >= LENGTH_BASE.len() { |
| 490 | self.state = InflateState::End; |
| 491 | return Err(DecompressError::InvalidData); |
| 492 | } |
| 493 | let len_bits = LENGTH_ADD_BITS[len_idx]; |
| 494 | let add_base = LENGTH_BASE[len_idx] as usize; |
| 495 | if len_bits > 0 { |
| 496 | self.state = InflateState::FixedBlockLengthExt(add_base, len_bits); |
| 497 | } else { |
| 498 | self.state = InflateState::FixedBlockDist(add_base); |
| 499 | } |
| 500 | } |
| 501 | }, |
| 502 | InflateState::FixedBlockLiteral(sym) => { |
| 503 | if self.output_idx >= dst.len() { |
| 504 | self.br = csrc.br; |
| 505 | return Err(DecompressError::OutputFull); |
| 506 | } |
| 507 | self.put_literal(sym); |
| 508 | dst[self.output_idx] = sym; |
| 509 | self.output_idx += 1; |
| 510 | self.state = InflateState::FixedBlock; |
| 511 | }, |
| 512 | InflateState::FixedBlockLengthExt(base, bits) => { |
| 513 | let add = read_bits!(self, csrc, bits) as usize; |
| 514 | self.state = InflateState::FixedBlockDist(base + add); |
| 515 | }, |
| 516 | InflateState::FixedBlockDist(length) => { |
| 517 | let dist_idx = reverse_bits(read_bits!(self, csrc, 5), 5) as usize; |
| 518 | if dist_idx >= DIST_BASE.len() { |
| 519 | self.state = InflateState::End; |
| 520 | return Err(DecompressError::InvalidData); |
| 521 | } |
| 522 | let dist_bits = DIST_ADD_BITS[dist_idx]; |
| 523 | let dist_base = DIST_BASE[dist_idx] as usize; |
| 524 | if dist_bits == 0 { |
| 525 | self.state = InflateState::FixedBlockCopy(length, dist_base); |
| 526 | } else { |
| 527 | self.state = InflateState::FixedBlockDistExt(length, dist_base, dist_bits); |
| 528 | } |
| 529 | }, |
| 530 | InflateState::FixedBlockDistExt(length, base, bits) => { |
| 531 | let add = read_bits!(self, csrc, bits) as usize; |
| 532 | self.state = InflateState::FixedBlockCopy(length, base + add); |
| 533 | }, |
| 534 | InflateState::FixedBlockCopy(length, dist) => { |
| 535 | if self.output_idx + length > dst.len() { |
| 536 | let copy_size = dst.len() - self.output_idx; |
| 537 | let ret = self.lz_copy(dist, copy_size, &mut dst[self.output_idx..]); |
| 538 | if ret.is_err() { |
| 539 | self.state = InflateState::End; |
| 540 | return Err(DecompressError::InvalidData); |
| 541 | } |
| 542 | self.output_idx += copy_size; |
| 543 | self.br = csrc.br; |
| 544 | self.state = InflateState::FixedBlockCopy(length - copy_size, dist); |
| 545 | return Err(DecompressError::OutputFull); |
| 546 | } |
| 547 | let ret = self.lz_copy(dist, length, &mut dst[self.output_idx..]); |
| 548 | if ret.is_err() { |
| 549 | self.state = InflateState::End; |
| 550 | return Err(DecompressError::InvalidData); |
| 551 | } |
| 552 | self.output_idx += length; |
| 553 | self.state = InflateState::FixedBlock; |
| 554 | } |
| 555 | InflateState::DynBlockHlit => { |
| 556 | self.hlit = (read_bits!(self, csrc, 5) as usize) + 257; |
| 557 | if self.hlit >= 287 { |
| 558 | self.state = InflateState::End; |
| 559 | return Err(DecompressError::InvalidHeader); |
| 560 | } |
| 561 | self.state = InflateState::DynBlockHdist; |
| 562 | } |
| 563 | InflateState::DynBlockHdist => { |
| 564 | self.hdist = (read_bits!(self, csrc, 5) as usize) + 1; |
| 565 | self.state = InflateState::DynBlockHclen; |
| 566 | }, |
| 567 | InflateState::DynBlockHclen => { |
| 568 | let hclen = (read_bits!(self, csrc, 4) as usize) + 4; |
| 569 | self.cur_len_idx = 0; |
| 570 | self.len_lengths = [0; 19]; |
| 571 | self.all_lengths = [0; NUM_LITERALS + NUM_DISTS]; |
| 572 | self.state = InflateState::DynLengths(hclen); |
| 573 | }, |
| 574 | InflateState::DynLengths(len) => { |
| 575 | for i in 0..len { |
| 576 | if csrc.left() < 3 { |
| 577 | self.br = csrc.br; |
| 578 | self.state = InflateState::DynLengths(len - i); |
| 579 | return Err(DecompressError::ShortData); |
| 580 | } |
| 581 | self.len_lengths[LEN_RECODE[self.cur_len_idx]] = csrc.read(3).unwrap() as u8; |
| 582 | self.cur_len_idx += 1; |
| 583 | } |
| 584 | let mut len_codes = [ShortCodebookDesc { code: 0, bits: 0 }; 19]; |
| 585 | lengths_to_codes(&self.len_lengths, &mut len_codes)?; |
| 586 | let mut cr = ShortCodebookDescReader::new(len_codes.to_vec()); |
| 587 | let ret = Codebook::new(&mut cr, CodebookMode::LSB); |
| 588 | if ret.is_err() { |
| 589 | self.state = InflateState::End; |
| 590 | return Err(DecompressError::InvalidHeader); |
| 591 | } |
| 592 | self.dyn_len_cb = Some(ret.unwrap()); |
| 593 | self.cur_len_idx = 0; |
| 594 | self.state = InflateState::DynCodeLengths; |
| 595 | }, |
| 596 | InflateState::DynCodeLengths => { |
| 597 | if let Some(ref len_cb) = self.dyn_len_cb { |
| 598 | while self.cur_len_idx < self.hlit + self.hdist { |
| 599 | let ret = csrc.read_cb(len_cb); |
| 600 | let val = match ret { |
| 601 | Ok(val) => val, |
| 602 | Err(CodebookError::MemoryError) => { |
| 603 | self.br = csrc.br; |
| 604 | return Err(DecompressError::ShortData); |
| 605 | }, |
| 606 | Err(_) => { |
| 607 | self.state = InflateState::End; |
| 608 | return Err(DecompressError::InvalidHeader); |
| 609 | }, |
| 610 | }; |
| 611 | if val < 16 { |
| 612 | self.all_lengths[self.cur_len_idx] = val as u8; |
| 613 | self.cur_len_idx += 1; |
| 614 | } else { |
| 615 | let idx = (val as usize) - 16; |
| 616 | if idx > 2 { |
| 617 | self.state = InflateState::End; |
| 618 | return Err(DecompressError::InvalidHeader); |
| 619 | } |
| 620 | self.state = InflateState::DynCodeLengthsAdd(idx); |
| 621 | continue 'main; |
| 622 | } |
| 623 | } |
| 624 | let (lit_lengths, dist_lengths) = self.all_lengths.split_at(self.hlit); |
| 625 | |
| 626 | let mut lit_codes = [ShortCodebookDesc { code: 0, bits: 0 }; NUM_LITERALS]; |
| 627 | lengths_to_codes(&lit_lengths, &mut lit_codes)?; |
| 628 | let mut cr = ShortCodebookDescReader::new(lit_codes.to_vec()); |
| 629 | let ret = Codebook::new(&mut cr, CodebookMode::LSB); |
| 630 | if ret.is_err() { return Err(DecompressError::InvalidHeader); } |
| 631 | self.dyn_lit_cb = Some(ret.unwrap()); |
| 632 | |
| 633 | let mut dist_codes = [ShortCodebookDesc { code: 0, bits: 0 }; NUM_DISTS]; |
| 634 | lengths_to_codes(&dist_lengths[..self.hdist], &mut dist_codes)?; |
| 635 | let mut cr = ShortCodebookDescReader::new(dist_codes.to_vec()); |
| 636 | let ret = Codebook::new(&mut cr, CodebookMode::LSB); |
| 637 | if ret.is_err() { return Err(DecompressError::InvalidHeader); } |
| 638 | self.dyn_dist_cb = Some(ret.unwrap()); |
| 639 | |
| 640 | self.state = InflateState::DynBlock; |
| 641 | } else { |
| 642 | unreachable!(); |
| 643 | } |
| 644 | }, |
| 645 | InflateState::DynCodeLengthsAdd(mode) => { |
| 646 | let base = REPEAT_BASE[mode] as usize; |
| 647 | let bits = REPEAT_BITS[mode]; |
| 648 | let len = base + read_bits!(self, csrc, bits) as usize; |
| 649 | if self.cur_len_idx + len > self.hlit + self.hdist { |
| 650 | self.state = InflateState::End; |
| 651 | return Err(DecompressError::InvalidHeader); |
| 652 | } |
| 653 | let rpt = if mode == 0 { |
| 654 | if self.cur_len_idx == 0 { |
| 655 | self.state = InflateState::End; |
| 656 | return Err(DecompressError::InvalidHeader); |
| 657 | } |
| 658 | self.all_lengths[self.cur_len_idx - 1] |
| 659 | } else { |
| 660 | 0 |
| 661 | }; |
| 662 | for _ in 0..len { |
| 663 | self.all_lengths[self.cur_len_idx] = rpt; |
| 664 | self.cur_len_idx += 1; |
| 665 | } |
| 666 | self.state = InflateState::DynCodeLengths; |
| 667 | }, |
| 668 | InflateState::DynBlock => { |
| 669 | if let Some(ref lit_cb) = self.dyn_lit_cb { |
| 670 | let val = read_cb!(self, csrc, lit_cb); |
| 671 | if val < 256 { |
| 672 | if self.output_idx >= dst.len() { |
| 673 | self.br = csrc.br; |
| 674 | self.state = InflateState::DynBlockLiteral(val as u8); |
| 675 | return Err(DecompressError::OutputFull); |
| 676 | } |
| 677 | self.put_literal(val as u8); |
| 678 | dst[self.output_idx] = val as u8; |
| 679 | self.output_idx += 1; |
| 680 | } else if val == 256 { |
| 681 | if self.final_block { |
| 682 | self.state = InflateState::End; |
| 683 | return Ok(self.output_idx); |
| 684 | } else { |
| 685 | self.state = InflateState::BlockStart; |
| 686 | } |
| 687 | } else { |
| 688 | let len_idx = (val - 257) as usize; |
| 689 | if len_idx >= LENGTH_BASE.len() { |
| 690 | self.state = InflateState::End; |
| 691 | return Err(DecompressError::InvalidData); |
| 692 | } |
| 693 | let len_bits = LENGTH_ADD_BITS[len_idx]; |
| 694 | let add_base = LENGTH_BASE[len_idx] as usize; |
| 695 | if len_bits > 0 { |
| 696 | self.state = InflateState::DynBlockLengthExt(add_base, len_bits); |
| 697 | } else { |
| 698 | self.state = InflateState::DynBlockDist(add_base); |
| 699 | } |
| 700 | } |
| 701 | } else { |
| 702 | unreachable!(); |
| 703 | } |
| 704 | }, |
| 705 | InflateState::DynBlockLiteral(sym) => { |
| 706 | if self.output_idx >= dst.len() { |
| 707 | self.br = csrc.br; |
| 708 | return Err(DecompressError::OutputFull); |
| 709 | } |
| 710 | self.put_literal(sym); |
| 711 | dst[self.output_idx] = sym; |
| 712 | self.output_idx += 1; |
| 713 | self.state = InflateState::DynBlock; |
| 714 | }, |
| 715 | InflateState::DynBlockLengthExt(base, bits) => { |
| 716 | let add = read_bits!(self, csrc, bits) as usize; |
| 717 | self.state = InflateState::DynBlockDist(base + add); |
| 718 | }, |
| 719 | InflateState::DynBlockDist(length) => { |
| 720 | if let Some(ref dist_cb) = self.dyn_dist_cb { |
| 721 | let dist_idx = read_cb!(self, csrc, dist_cb) as usize; |
| 722 | if dist_idx >= DIST_BASE.len() { |
| 723 | self.state = InflateState::End; |
| 724 | return Err(DecompressError::InvalidData); |
| 725 | } |
| 726 | let dist_bits = DIST_ADD_BITS[dist_idx]; |
| 727 | let dist_base = DIST_BASE[dist_idx] as usize; |
| 728 | if dist_bits == 0 { |
| 729 | self.state = InflateState::DynCopy(length, dist_base); |
| 730 | } else { |
| 731 | self.state = InflateState::DynBlockDistExt(length, dist_base, dist_bits); |
| 732 | } |
| 733 | } else { |
| 734 | unreachable!(); |
| 735 | } |
| 736 | }, |
| 737 | InflateState::DynBlockDistExt(length, base, bits) => { |
| 738 | let add = read_bits!(self, csrc, bits) as usize; |
| 739 | self.state = InflateState::DynCopy(length, base + add); |
| 740 | }, |
| 741 | InflateState::DynCopy(length, dist) => { |
| 742 | if self.output_idx + length > dst.len() { |
| 743 | let copy_size = dst.len() - self.output_idx; |
| 744 | let ret = self.lz_copy(dist, copy_size, &mut dst[self.output_idx..]); |
| 745 | if ret.is_err() { |
| 746 | self.state = InflateState::End; |
| 747 | return Err(DecompressError::InvalidData); |
| 748 | } |
| 749 | self.output_idx += copy_size; |
| 750 | self.br = csrc.br; |
| 751 | self.state = InflateState::DynCopy(length - copy_size, dist); |
| 752 | return Err(DecompressError::OutputFull); |
| 753 | } |
| 754 | let ret = self.lz_copy(dist, length, &mut dst[self.output_idx..]); |
| 755 | if ret.is_err() { |
| 756 | self.state = InflateState::End; |
| 757 | return Err(DecompressError::InvalidData); |
| 758 | } |
| 759 | self.output_idx += length; |
| 760 | self.state = InflateState::DynBlock; |
| 761 | } |
| 762 | InflateState::End => { |
| 763 | return Ok(0); |
| 764 | }, |
| 765 | } |
| 766 | } |
| 767 | } |
| 768 | ///! Resets decoder state. |
| 769 | pub fn reset(&mut self) { |
| 770 | self.bpos = 0; |
| 771 | self.output_idx = 0; |
| 772 | self.full_pos = 0; |
| 773 | self.state = InflateState::Start; |
| 774 | } |
| 775 | |
| 776 | ///! Decompresses input data into output returning the uncompressed data length. |
| 777 | pub fn uncompress(src: &[u8], dst: &mut [u8]) -> DecompressResult<usize> { |
| 778 | let mut inflate = Self::new(); |
| 779 | inflate.decompress_data(src, dst, false) |
| 780 | } |
| 781 | } |
| 782 | |
| 783 | impl Default for Inflate { |
| 784 | fn default() -> Self { |
| 785 | Self::new() |
| 786 | } |
| 787 | } |
| 788 | |
| 789 | fn lengths_to_codes(lens: &[u8], codes: &mut [ShortCodebookDesc]) -> DecompressResult<()> { |
| 790 | let mut bits = [0u32; 32]; |
| 791 | let mut pfx = [0u32; 33]; |
| 792 | for len in lens.iter() { |
| 793 | let len = *len as usize; |
| 794 | if len >= bits.len() { |
| 795 | return Err(DecompressError::InvalidHeader); |
| 796 | } |
| 797 | bits[len] += 1; |
| 798 | } |
| 799 | bits[0] = 0; |
| 800 | let mut code = 0; |
| 801 | for i in 0..bits.len() { |
| 802 | code = (code + bits[i]) << 1; |
| 803 | pfx[i + 1] = code; |
| 804 | } |
| 805 | |
| 806 | for (len, codes) in lens.iter().zip(codes.iter_mut()) { |
| 807 | let len = *len as usize; |
| 808 | if len != 0 { |
| 809 | let bits = len as u8; |
| 810 | *codes = ShortCodebookDesc { code: reverse_bits(pfx[len], bits), bits }; |
| 811 | pfx[len] += 1; |
| 812 | } else { |
| 813 | *codes = ShortCodebookDesc { code: 0, bits: 0 }; |
| 814 | } |
| 815 | } |
| 816 | |
| 817 | Ok(()) |
| 818 | } |
| 819 | |
| 820 | struct GzipCRC32 { |
| 821 | tab: [u32; 256], |
| 822 | crc: u32, |
| 823 | } |
| 824 | |
| 825 | impl GzipCRC32 { |
| 826 | #[allow(clippy::unreadable_literal)] |
| 827 | fn new() -> Self { |
| 828 | let mut tab = [0u32; 256]; |
| 829 | for i in 0..256 { |
| 830 | let mut c = i as u32; |
| 831 | for _ in 0..8 { |
| 832 | if (c & 1) != 0 { |
| 833 | c = 0xEDB88320 ^ (c >> 1); |
| 834 | } else { |
| 835 | c >>= 1; |
| 836 | } |
| 837 | } |
| 838 | tab[i] = c; |
| 839 | } |
| 840 | Self { tab, crc: 0 } |
| 841 | } |
| 842 | fn update_crc(&mut self, src: &[u8]) { |
| 843 | let mut c = !self.crc; |
| 844 | for el in src.iter() { |
| 845 | c = self.tab[((c ^ u32::from(*el)) & 0xFF) as usize] ^ (c >> 8); |
| 846 | } |
| 847 | self.crc = !c; |
| 848 | } |
| 849 | } |
| 850 | |
| 851 | ///! Decodes input data in gzip file format (RFC 1952) returning a vector containing decoded data. |
| 852 | pub fn gzip_decode(br: &mut ByteReader, skip_crc: bool) -> DecompressResult<Vec<u8>> { |
| 853 | const FLAG_HCRC: u8 = 0x02; |
| 854 | const FLAG_EXTRA: u8 = 0x04; |
| 855 | const FLAG_NAME: u8 = 0x08; |
| 856 | const FLAG_COMMENT: u8 = 0x10; |
| 857 | |
| 858 | let id1 = br.read_byte()?; |
| 859 | let id2 = br.read_byte()?; |
| 860 | let cm = br.read_byte()?; |
| 861 | let flg = br.read_byte()?; |
| 862 | let _mtime = br.read_u32le()?; |
| 863 | let _xfl = br.read_byte()?; |
| 864 | let _os = br.read_byte()?; |
| 865 | if id1 != 0x1F || id2 != 0x8B || cm != 8 { |
| 866 | return Err(DecompressError::InvalidHeader); |
| 867 | } |
| 868 | |
| 869 | if (flg & FLAG_EXTRA) != 0 { |
| 870 | let xlen = br.read_u16le()? as usize; |
| 871 | br.read_skip(xlen)?; |
| 872 | } |
| 873 | if (flg & FLAG_NAME) != 0 { |
| 874 | loop { |
| 875 | let b = br.read_byte()?; |
| 876 | if b == 0 { |
| 877 | break; |
| 878 | } |
| 879 | } |
| 880 | } |
| 881 | if (flg & FLAG_COMMENT) != 0 { |
| 882 | loop { |
| 883 | let b = br.read_byte()?; |
| 884 | if b == 0 { |
| 885 | break; |
| 886 | } |
| 887 | } |
| 888 | } |
| 889 | let _hcrc = if (flg & FLAG_HCRC) != 0 { |
| 890 | br.read_u16le()? |
| 891 | } else { |
| 892 | 0 |
| 893 | }; |
| 894 | if (flg & 0xE0) != 0 { |
| 895 | return Err(DecompressError::Unsupported); |
| 896 | } |
| 897 | |
| 898 | let mut output: Vec<u8> = Vec::new(); |
| 899 | let mut tail = [0u8; 8]; |
| 900 | let mut inblk = [0u8; 1024]; |
| 901 | let mut oblk = [0u8; 4096]; |
| 902 | let mut inflate = Inflate::new(); |
| 903 | let mut checker = GzipCRC32::new(); |
| 904 | |
| 905 | loop { |
| 906 | let ret = br.read_buf_some(&mut inblk); |
| 907 | if let Err(ByteIOError::EOF) = ret { |
| 908 | break; |
| 909 | } |
| 910 | let inlen = match ret { |
| 911 | Ok(val) => val, |
| 912 | Err(_) => return Err(DecompressError::IOError), |
| 913 | }; |
| 914 | let mut repeat = false; |
| 915 | loop { |
| 916 | let ret = inflate.decompress_data(&inblk[..inlen], &mut oblk, repeat); |
| 917 | match ret { |
| 918 | Ok(outlen) => { |
| 919 | checker.update_crc(&oblk[..outlen]); |
| 920 | output.extend_from_slice(&oblk[..outlen]); |
| 921 | break; |
| 922 | }, |
| 923 | Err(DecompressError::ShortData) => { |
| 924 | break; |
| 925 | }, |
| 926 | Err(DecompressError::OutputFull) => { |
| 927 | repeat = true; |
| 928 | checker.update_crc(&oblk); |
| 929 | output.extend_from_slice(&oblk); |
| 930 | }, |
| 931 | Err(err) => { |
| 932 | return Err(err); |
| 933 | }, |
| 934 | } |
| 935 | } |
| 936 | // Save last 8 bytes for CRC and size. |
| 937 | if inlen >= 8 { |
| 938 | tail.copy_from_slice(&inblk[inlen - 8..][..8]); |
| 939 | } else { |
| 940 | let shift_len = 8 - inlen; |
| 941 | for i in 0..shift_len { |
| 942 | tail[i] = tail[i + inlen]; |
| 943 | } |
| 944 | for i in shift_len..8 { |
| 945 | tail[i] = inblk[i - shift_len]; |
| 946 | } |
| 947 | } |
| 948 | } |
| 949 | if !skip_crc { |
| 950 | if !inflate.is_finished() { println!("???"); } |
| 951 | let crc = read_u32le(&tail[0..4])?; |
| 952 | let size = read_u32le(&tail[4..8])?; |
| 953 | if size != (output.len() as u32) { |
| 954 | return Err(DecompressError::CRCError); |
| 955 | } |
| 956 | if crc != checker.crc { |
| 957 | return Err(DecompressError::CRCError); |
| 958 | } |
| 959 | } |
| 960 | |
| 961 | Ok(output) |
| 962 | } |
| 963 | |
| 964 | #[derive(Clone,Copy,Default)] |
| 965 | struct Token { |
| 966 | sym: u16, |
| 967 | distsym: u8, |
| 968 | len: u16, |
| 969 | dist: u16, |
| 970 | } |
| 971 | |
| 972 | const TOKEN_EOB: Token = Token { sym: 256, distsym: 0, len: 0, dist: 0 }; |
| 973 | |
| 974 | impl Token { |
| 975 | fn from_literal(sym: u8) -> Self { |
| 976 | Self { |
| 977 | sym: u16::from(sym), |
| 978 | distsym: 0, |
| 979 | dist: 0, |
| 980 | len: 0, |
| 981 | } |
| 982 | } |
| 983 | fn from_match(dist: u16, len: u16) -> Self { |
| 984 | let sym = match len { |
| 985 | 3..= 10 => 257 + len - 3, |
| 986 | 11..= 18 => 265 + (len - 11) / 2, |
| 987 | 19..= 34 => 269 + (len - 19) / 4, |
| 988 | 35..= 66 => 273 + (len - 35) / 8, |
| 989 | 67..=130 => 277 + (len - 67) / 16, |
| 990 | 131..=257 => 281 + (len - 131) / 32, |
| 991 | _ => 285, |
| 992 | }; |
| 993 | let distsym = if dist <= 4 { |
| 994 | (dist - 1) as u8 |
| 995 | } else { |
| 996 | let bits = 16 - (dist - 1).leading_zeros(); |
| 997 | (bits as u8) * 2 - 2 + if ((dist - 1) & (1 << (bits - 2))) != 0 { 1 } else { 0 } |
| 998 | }; |
| 999 | Self { |
| 1000 | sym, distsym, len, dist |
| 1001 | } |
| 1002 | } |
| 1003 | } |
| 1004 | |
| 1005 | fn add_codes(lens: &[u8], stats: &mut [u32], toks: &mut Vec<(u8, u8)>) { |
| 1006 | let mut last = 42; |
| 1007 | let mut lcount = 0; |
| 1008 | |
| 1009 | for &len in lens.iter() { |
| 1010 | if len == last { |
| 1011 | lcount += 1; |
| 1012 | } else { |
| 1013 | if last == 0 { |
| 1014 | while lcount > 10 { |
| 1015 | let run = lcount.min(138); |
| 1016 | stats[18] += 1; |
| 1017 | toks.push((18, run - 11)); |
| 1018 | lcount -= run; |
| 1019 | } |
| 1020 | if lcount >= 3 { |
| 1021 | stats[17] += 1; |
| 1022 | toks.push((17, lcount - 3)); |
| 1023 | lcount = 0; |
| 1024 | } |
| 1025 | for _ in 0..lcount { |
| 1026 | stats[0] += 1; |
| 1027 | toks.push((0, 0)); |
| 1028 | } |
| 1029 | } else { |
| 1030 | while lcount >= 3 { |
| 1031 | let run = lcount.min(6); |
| 1032 | stats[16] += 1; |
| 1033 | toks.push((16, run - 3)); |
| 1034 | lcount -= run; |
| 1035 | } |
| 1036 | for _ in 0..lcount { |
| 1037 | stats[last as usize] += 1; |
| 1038 | toks.push((last, 0)); |
| 1039 | } |
| 1040 | } |
| 1041 | stats[len as usize] += 1; |
| 1042 | toks.push((len, 0)); |
| 1043 | last = len; |
| 1044 | lcount = 0; |
| 1045 | } |
| 1046 | } |
| 1047 | if lcount > 0 { |
| 1048 | if last == 0 { |
| 1049 | while lcount > 10 { |
| 1050 | let run = lcount.min(138); |
| 1051 | stats[18] += 1; |
| 1052 | toks.push((18, run - 11)); |
| 1053 | lcount -= run; |
| 1054 | } |
| 1055 | if lcount >= 3 { |
| 1056 | stats[17] += 1; |
| 1057 | toks.push((17, lcount - 3)); |
| 1058 | lcount = 0; |
| 1059 | } |
| 1060 | for _ in 0..lcount { |
| 1061 | stats[0] += 1; |
| 1062 | toks.push((0, 0)); |
| 1063 | } |
| 1064 | } else { |
| 1065 | while lcount >= 3 { |
| 1066 | let run = lcount.min(6); |
| 1067 | stats[16] += 1; |
| 1068 | toks.push((16, run - 3)); |
| 1069 | lcount -= run; |
| 1070 | } |
| 1071 | for _ in 0..lcount { |
| 1072 | stats[last as usize] += 1; |
| 1073 | toks.push((last, 0)); |
| 1074 | } |
| 1075 | } |
| 1076 | } |
| 1077 | } |
| 1078 | |
| 1079 | ///! Deflate stream writer. |
| 1080 | pub struct DeflateWriter { |
| 1081 | dst: Vec<u8>, |
| 1082 | bits: u8, |
| 1083 | bbuf: u32, |
| 1084 | } |
| 1085 | |
| 1086 | impl DeflateWriter { |
| 1087 | ///! Creates a new instance of `DeflateWriter` for a provided output. |
| 1088 | pub fn new(dst: Vec<u8>) -> Self { |
| 1089 | Self { |
| 1090 | dst, |
| 1091 | bits: 0, |
| 1092 | bbuf: 0, |
| 1093 | } |
| 1094 | } |
| 1095 | fn align(&mut self) { |
| 1096 | if (self.bits & 7) != 0 { |
| 1097 | self.bits += 8 - (self.bits & 7); |
| 1098 | } |
| 1099 | } |
| 1100 | fn flush(&mut self) { |
| 1101 | while self.bits >= 8 { |
| 1102 | self.dst.push(self.bbuf as u8); |
| 1103 | self.bbuf >>= 8; |
| 1104 | self.bits -= 8; |
| 1105 | } |
| 1106 | } |
| 1107 | fn write(&mut self, val: u16, len: u8) { |
| 1108 | self.flush(); |
| 1109 | self.bbuf |= u32::from(val) << self.bits; |
| 1110 | self.bits += len; |
| 1111 | } |
| 1112 | ///! Finishes writing the stream and returns the output vector. |
| 1113 | pub fn end(mut self) -> Vec<u8> { |
| 1114 | self.flush(); |
| 1115 | if self.bits > 0 { |
| 1116 | self.dst.push(self.bbuf as u8); |
| 1117 | } |
| 1118 | self.dst |
| 1119 | } |
| 1120 | |
| 1121 | fn write_codes(&mut self, codes: &CodeHuff, dists: &DistHuff) { |
| 1122 | let mut stats = [0u32; 19]; |
| 1123 | let mut toks = Vec::with_capacity(NUM_LITERALS + NUM_DISTS); |
| 1124 | let mut cw = [0u16; 19]; |
| 1125 | let mut cl = [0u8; 19]; |
| 1126 | let mut nc = 0; |
| 1127 | |
| 1128 | add_codes(&codes.lens[..codes.num_codes], &mut stats, &mut toks); |
| 1129 | add_codes(&dists.lens[..dists.num_codes], &mut stats, &mut toks); |
| 1130 | |
| 1131 | gen_tree(&mut cw, &mut cl, &mut nc, &mut stats, 7); |
| 1132 | |
| 1133 | nc = cw.len(); |
| 1134 | for &idx in LEN_RECODE.iter().rev() { |
| 1135 | if cl[idx] == 0 { |
| 1136 | nc -= 1; |
| 1137 | } else { |
| 1138 | break; |
| 1139 | } |
| 1140 | } |
| 1141 | if nc < 4 { |
| 1142 | nc = 4; |
| 1143 | } |
| 1144 | self.write((nc - 4) as u16, 4); |
| 1145 | for &idx in LEN_RECODE.iter().take(nc) { |
| 1146 | self.write(u16::from(cl[idx]), 3); |
| 1147 | } |
| 1148 | for &(sym, add) in toks.iter() { |
| 1149 | self.write(cw[sym as usize], cl[sym as usize]); |
| 1150 | match sym { |
| 1151 | 16 => self.write(u16::from(add), 2), |
| 1152 | 17 => self.write(u16::from(add), 3), |
| 1153 | 18 => self.write(u16::from(add), 7), |
| 1154 | _ => {}, |
| 1155 | }; |
| 1156 | } |
| 1157 | } |
| 1158 | fn write_tokens(&mut self, src: &[Token], codes: &CodeHuff, dists: &DistHuff) { |
| 1159 | for &tok in src.iter() { |
| 1160 | self.write(codes.codes[tok.sym as usize], codes.lens[tok.sym as usize]); |
| 1161 | if tok.sym > 256 { |
| 1162 | self.write_len_bits(tok.len); |
| 1163 | self.write(dists.codes[tok.distsym as usize], dists.lens[tok.distsym as usize]); |
| 1164 | self.write_dist_bits(tok.dist); |
| 1165 | } |
| 1166 | } |
| 1167 | } |
| 1168 | fn write_len_bits(&mut self, len: u16) { |
| 1169 | let llen = len - 3; |
| 1170 | if llen >= 8 && llen < 255 { |
| 1171 | let bits = (16 - llen.leading_zeros() - 3) as u8; |
| 1172 | self.write(llen & ((1 << bits) - 1), bits); |
| 1173 | } |
| 1174 | } |
| 1175 | fn write_dist_bits(&mut self, dist: u16) { |
| 1176 | let ddist = dist - 1; |
| 1177 | if dist >= 4 { |
| 1178 | let bits = (16 - ddist.leading_zeros() - 2) as u8; |
| 1179 | self.write(ddist & ((1 << bits) - 1), bits); |
| 1180 | } |
| 1181 | } |
| 1182 | } |
| 1183 | |
| 1184 | struct CodeHuff { |
| 1185 | is_fixed: bool, |
| 1186 | stats: [u32; NUM_LITERALS], |
| 1187 | codes: [u16; NUM_LITERALS], |
| 1188 | lens: [u8; NUM_LITERALS], |
| 1189 | num_codes: usize, |
| 1190 | } |
| 1191 | |
| 1192 | impl CodeHuff { |
| 1193 | fn new(is_fixed: bool) -> Self { |
| 1194 | Self { |
| 1195 | is_fixed, |
| 1196 | stats: [0; NUM_LITERALS], |
| 1197 | codes: [0; NUM_LITERALS], |
| 1198 | lens: [0; NUM_LITERALS], |
| 1199 | num_codes: NUM_LITERALS, |
| 1200 | } |
| 1201 | } |
| 1202 | fn make_codes(&mut self, src: &[Token]) { |
| 1203 | if self.is_fixed { |
| 1204 | for i in 0..=143 { |
| 1205 | self.codes[i] = reverse_bits((i + 0x30) as u32, 8) as u16; |
| 1206 | self.lens[i] = 8; |
| 1207 | } |
| 1208 | for i in 144..=255 { |
| 1209 | self.codes[i] = reverse_bits((i + 0x100) as u32, 9) as u16; |
| 1210 | self.lens[i] = 9; |
| 1211 | } |
| 1212 | for i in 256..=279 { |
| 1213 | self.codes[i] = reverse_bits((i & 0x1F) as u32, 7) as u16; |
| 1214 | self.lens[i] = 7; |
| 1215 | } |
| 1216 | for i in 280..NUM_LITERALS { |
| 1217 | self.codes[i] = reverse_bits((i - 280 + 0xC0) as u32, 8) as u16; |
| 1218 | self.lens[i] = 8; |
| 1219 | } |
| 1220 | } else { |
| 1221 | for &tok in src.iter() { |
| 1222 | self.stats[tok.sym as usize] += 1; |
| 1223 | } |
| 1224 | gen_tree(&mut self.codes, &mut self.lens, &mut self.num_codes, &mut self.stats, 15); |
| 1225 | if self.num_codes < 257 { |
| 1226 | self.num_codes = 257; |
| 1227 | } |
| 1228 | } |
| 1229 | } |
| 1230 | } |
| 1231 | |
| 1232 | struct DistHuff { |
| 1233 | is_fixed: bool, |
| 1234 | stats: [u32; NUM_DISTS], |
| 1235 | codes: [u16; NUM_DISTS], |
| 1236 | lens: [u8; NUM_DISTS], |
| 1237 | num_codes: usize, |
| 1238 | } |
| 1239 | |
| 1240 | impl DistHuff { |
| 1241 | fn new(is_fixed: bool) -> Self { |
| 1242 | Self { |
| 1243 | is_fixed, |
| 1244 | stats: [0; NUM_DISTS], |
| 1245 | codes: [0; NUM_DISTS], |
| 1246 | lens: [0; NUM_DISTS], |
| 1247 | num_codes: NUM_DISTS, |
| 1248 | } |
| 1249 | } |
| 1250 | fn make_codes(&mut self, src: &[Token]) { |
| 1251 | if self.is_fixed { |
| 1252 | for i in 0..NUM_DISTS { |
| 1253 | self.codes[i] = reverse_bits(i as u32, 5) as u16; |
| 1254 | self.lens[i] = 5; |
| 1255 | } |
| 1256 | } else { |
| 1257 | for &tok in src.iter() { |
| 1258 | if tok.sym > 256 { |
| 1259 | self.stats[tok.distsym as usize] += 1; |
| 1260 | } |
| 1261 | } |
| 1262 | gen_tree(&mut self.codes, &mut self.lens, &mut self.num_codes, &mut self.stats, 15); |
| 1263 | if self.num_codes < 1 { |
| 1264 | self.num_codes = 1; |
| 1265 | } |
| 1266 | } |
| 1267 | } |
| 1268 | } |
| 1269 | |
| 1270 | #[derive(Clone,Copy,Default)] |
| 1271 | struct Node { |
| 1272 | sym: u16, |
| 1273 | w: u16, |
| 1274 | idx0: u16, |
| 1275 | idx1: u16, |
| 1276 | } |
| 1277 | |
| 1278 | const NODE_SYM: u16 = 65500; |
| 1279 | |
| 1280 | struct Tree { |
| 1281 | nodes: [Node; NUM_LITERALS * 2], |
| 1282 | nnodes: usize, |
| 1283 | } |
| 1284 | |
| 1285 | impl Tree { |
| 1286 | fn new() -> Self { |
| 1287 | Self { |
| 1288 | nodes: [Node::default(); NUM_LITERALS * 2], |
| 1289 | nnodes: 0, |
| 1290 | } |
| 1291 | } |
| 1292 | fn insert(&mut self, val: Node) { |
| 1293 | let mut idx = self.nnodes; |
| 1294 | for (i, nn) in self.nodes[..self.nnodes].iter().enumerate() { |
| 1295 | if nn.w > val.w { |
| 1296 | idx = i; |
| 1297 | break; |
| 1298 | } |
| 1299 | } |
| 1300 | if idx < self.nnodes { |
| 1301 | for i in (idx..self.nnodes).rev() { |
| 1302 | self.nodes[i + 1] = self.nodes[i]; |
| 1303 | } |
| 1304 | } |
| 1305 | self.nodes[idx] = val; |
| 1306 | self.nnodes += 1; |
| 1307 | } |
| 1308 | fn trim(&mut self) { |
| 1309 | let mut start = self.nnodes; |
| 1310 | for (i, n) in self.nodes[..self.nnodes].iter().enumerate() { |
| 1311 | if n.w != 0 { |
| 1312 | start = i; |
| 1313 | break; |
| 1314 | } |
| 1315 | } |
| 1316 | if start != 0 { |
| 1317 | for i in 0..self.nnodes - start { |
| 1318 | self.nodes[i] = self.nodes[i + start]; |
| 1319 | } |
| 1320 | self.nnodes -= start; |
| 1321 | } |
| 1322 | } |
| 1323 | fn build(&mut self) { |
| 1324 | if self.nnodes == 1 { |
| 1325 | self.nodes[0].w = 1; |
| 1326 | return; |
| 1327 | } |
| 1328 | let mut start = 0; |
| 1329 | while start + 1 < self.nnodes { |
| 1330 | let nn1 = self.nodes[start]; |
| 1331 | let nn2 = self.nodes[start + 1]; |
| 1332 | let n = Node { |
| 1333 | sym: NODE_SYM, |
| 1334 | w: nn1.w + nn2.w, |
| 1335 | idx0: start as u16, |
| 1336 | idx1: (start + 1) as u16, |
| 1337 | }; |
| 1338 | self.nodes[start].w = 0; |
| 1339 | self.nodes[start + 1].w = 0; |
| 1340 | start += 2; |
| 1341 | self.insert(n); |
| 1342 | } |
| 1343 | if self.nnodes > 1 { |
| 1344 | self.assign_len(self.nnodes - 1, 0); |
| 1345 | } |
| 1346 | } |
| 1347 | fn assign_len(&mut self, idx: usize, len: u16) { |
| 1348 | if self.nodes[idx].sym == NODE_SYM { |
| 1349 | self.assign_len(self.nodes[idx].idx0 as usize, len + 1); |
| 1350 | self.assign_len(self.nodes[idx].idx1 as usize, len + 1); |
| 1351 | } else { |
| 1352 | self.nodes[idx].w = len; |
| 1353 | } |
| 1354 | } |
| 1355 | } |
| 1356 | |
| 1357 | fn gen_tree(codes: &mut [u16], lens: &mut [u8], num_codes: &mut usize, stats: &mut [u32], max_bits: u8) { |
| 1358 | let mut tot_w = 0; |
| 1359 | for &w in stats.iter() { |
| 1360 | tot_w += w; |
| 1361 | } |
| 1362 | if tot_w == 0 { |
| 1363 | codes[0] = 0; |
| 1364 | lens[0] = 0; |
| 1365 | *num_codes = 0; |
| 1366 | return; |
| 1367 | } |
| 1368 | while tot_w > (1 << max_bits) { |
| 1369 | for w in stats.iter_mut() { |
| 1370 | *w = (*w + 1) >> 1; |
| 1371 | } |
| 1372 | tot_w = 0; |
| 1373 | for &w in stats.iter() { |
| 1374 | tot_w += w; |
| 1375 | } |
| 1376 | } |
| 1377 | let mut tree = Tree::new(); |
| 1378 | for (sym, &w) in stats.iter().enumerate() { |
| 1379 | tree.insert(Node{ sym: sym as u16, w: w as u16, idx0: 64000, idx1: 64000 }); |
| 1380 | } |
| 1381 | tree.trim(); |
| 1382 | tree.build(); |
| 1383 | |
| 1384 | for n in tree.nodes[..tree.nnodes].iter() { |
| 1385 | if n.sym != NODE_SYM { |
| 1386 | lens[n.sym as usize] = n.w as u8; |
| 1387 | } |
| 1388 | } |
| 1389 | lengths_to_codes16(lens, codes); |
| 1390 | let mut sz = codes.len(); |
| 1391 | for &len in lens.iter().rev() { |
| 1392 | if len != 0 { |
| 1393 | break; |
| 1394 | } |
| 1395 | sz -= 1; |
| 1396 | } |
| 1397 | *num_codes = sz; |
| 1398 | } |
| 1399 | |
| 1400 | fn lengths_to_codes16(lens: &[u8], codes: &mut [u16]) { |
| 1401 | let mut bits = [0u32; 32]; |
| 1402 | let mut pfx = [0u32; 33]; |
| 1403 | for len in lens.iter() { |
| 1404 | let len = *len as usize; |
| 1405 | bits[len] += 1; |
| 1406 | } |
| 1407 | bits[0] = 0; |
| 1408 | let mut code = 0; |
| 1409 | for i in 0..bits.len() { |
| 1410 | code = (code + bits[i]) << 1; |
| 1411 | pfx[i + 1] = code; |
| 1412 | } |
| 1413 | |
| 1414 | for (len, codes) in lens.iter().zip(codes.iter_mut()) { |
| 1415 | let len = *len as usize; |
| 1416 | if len != 0 { |
| 1417 | let bits = len as u8; |
| 1418 | *codes = reverse_bits(pfx[len], bits) as u16; |
| 1419 | pfx[len] += 1; |
| 1420 | } else { |
| 1421 | *codes = 0; |
| 1422 | } |
| 1423 | } |
| 1424 | } |
| 1425 | |
| 1426 | trait LZParse { |
| 1427 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>); |
| 1428 | } |
| 1429 | |
| 1430 | struct NoParser {} |
| 1431 | impl LZParse for NoParser { |
| 1432 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>) { |
| 1433 | dst.reserve(src.len()); |
| 1434 | for &b in src.iter() { |
| 1435 | dst.push(Token::from_literal(b)); |
| 1436 | } |
| 1437 | dst.push(TOKEN_EOB); |
| 1438 | } |
| 1439 | } |
| 1440 | |
| 1441 | fn check_match(src1: &[u8], src2: &[u8]) -> u16 { |
| 1442 | let mut len = 0; |
| 1443 | for (&a, &b) in src1.iter().zip(src2.iter()) { |
| 1444 | if a != b { |
| 1445 | break; |
| 1446 | } |
| 1447 | len += 1; |
| 1448 | } |
| 1449 | len |
| 1450 | } |
| 1451 | |
| 1452 | const HASH_SIZE: usize = 4096; |
| 1453 | const MAX_MATCH_LEN: usize = 258; |
| 1454 | const WINDOW_SIZE: usize = 32768 - MAX_MATCH_LEN; |
| 1455 | const NONEXT: usize = WINDOW_SIZE * 2; |
| 1456 | |
| 1457 | struct MatchFinder<'a> { |
| 1458 | src: &'a [u8], |
| 1459 | pos: usize, |
| 1460 | hstart: [usize; HASH_SIZE], |
| 1461 | hend: [usize; HASH_SIZE], |
| 1462 | hnext: [usize; WINDOW_SIZE * 3], |
| 1463 | } |
| 1464 | |
| 1465 | impl<'a> MatchFinder<'a> { |
| 1466 | fn new(src: &'a [u8]) -> Self { |
| 1467 | let mut obj = Self { |
| 1468 | src, |
| 1469 | pos: 0, |
| 1470 | hstart: [0; HASH_SIZE], |
| 1471 | hend: [0; HASH_SIZE], |
| 1472 | hnext: [0; WINDOW_SIZE * 3], |
| 1473 | }; |
| 1474 | obj.build_hash(); |
| 1475 | obj |
| 1476 | } |
| 1477 | fn hash(src: &[u8]) -> usize { |
| 1478 | let _ = src[2]; |
| 1479 | (((u16::from(src[0]) << 10) ^ (u16::from(src[1]) << 5) ^ u16::from(src[2])) & ((HASH_SIZE as u16) - 1)) as usize |
| 1480 | } |
| 1481 | fn build_hash(&mut self) { |
| 1482 | for el in self.hstart.iter_mut() { *el = NONEXT; } |
| 1483 | for el in self.hend.iter_mut() { *el = NONEXT; } |
| 1484 | for el in self.hnext.iter_mut() { *el = NONEXT; } |
| 1485 | if self.pos + 3 >= self.src.len() { |
| 1486 | return; |
| 1487 | } |
| 1488 | let end = (self.src.len() - 3).min(self.pos + NONEXT); |
| 1489 | for i in (self.pos .. end).rev() { |
| 1490 | let key = Self::hash(&self.src[i..]); |
| 1491 | if self.hstart[key] == NONEXT { |
| 1492 | self.hstart[key] = i; |
| 1493 | self.hend[key] = i; |
| 1494 | self.hnext[key] = NONEXT; |
| 1495 | } else { |
| 1496 | self.hnext[self.hend[key]] = i; |
| 1497 | self.hend[key] = i; |
| 1498 | } |
| 1499 | } |
| 1500 | } |
| 1501 | fn find_match(&mut self) -> (u16, u16) { |
| 1502 | if self.pos == 0 || self.pos + 3 > self.src.len() { |
| 1503 | return (0, 0); |
| 1504 | } |
| 1505 | let key = Self::hash(&self.src[self.pos..]) as usize; |
| 1506 | |
| 1507 | let mut best_pos = 0; |
| 1508 | let mut best_len = 0; |
| 1509 | let mut idx = self.hstart[key]; |
| 1510 | while idx != NONEXT && idx + WINDOW_SIZE > self.pos { |
| 1511 | if idx < self.pos { |
| 1512 | let cur_len = check_match(&self.src[self.pos..], &self.src[idx..]); |
| 1513 | if cur_len > best_len { |
| 1514 | best_len = cur_len; |
| 1515 | best_pos = self.pos - idx; |
| 1516 | if best_len >= (MAX_MATCH_LEN as u16) { |
| 1517 | return (best_pos as u16, MAX_MATCH_LEN as u16); |
| 1518 | } |
| 1519 | } |
| 1520 | } |
| 1521 | idx = self.hnext[idx]; |
| 1522 | } |
| 1523 | (best_pos as u16, best_len) |
| 1524 | } |
| 1525 | fn find_all_matches(&mut self, dists: &mut [u16; MAX_MATCH_LEN + 1]) { |
| 1526 | if self.pos == 0 || self.pos + 3 > self.src.len() { |
| 1527 | return; |
| 1528 | } |
| 1529 | let key = Self::hash(&self.src[self.pos..]) as usize; |
| 1530 | let mut idx = self.hstart[key]; |
| 1531 | while idx != NONEXT && idx + WINDOW_SIZE > self.pos { |
| 1532 | if idx < self.pos { |
| 1533 | let cur_len = (check_match(&self.src[self.pos..], &self.src[idx..]) as usize).min(MAX_MATCH_LEN); |
| 1534 | if cur_len > 0 && dists[cur_len] == 0 { |
| 1535 | dists[cur_len] = (self.pos - idx) as u16; |
| 1536 | } |
| 1537 | } |
| 1538 | idx = self.hnext[idx]; |
| 1539 | } |
| 1540 | } |
| 1541 | fn advance(&mut self, num: usize) { |
| 1542 | self.pos += num; |
| 1543 | |
| 1544 | if self.pos >= NONEXT { |
| 1545 | let (_, tail) = self.src.split_at(self.pos - WINDOW_SIZE); |
| 1546 | self.src = tail; |
| 1547 | self.pos = WINDOW_SIZE; |
| 1548 | self.build_hash(); |
| 1549 | } else { |
| 1550 | for (start, end) in self.hstart.iter_mut().zip(self.hend.iter_mut()) { |
| 1551 | let mut idx = *start; |
| 1552 | while idx != NONEXT && idx + WINDOW_SIZE < self.pos { |
| 1553 | idx = self.hnext[idx]; |
| 1554 | *start = idx; |
| 1555 | } |
| 1556 | if idx == NONEXT { |
| 1557 | *end = NONEXT; |
| 1558 | } |
| 1559 | } |
| 1560 | } |
| 1561 | } |
| 1562 | fn get_sym(&self) -> u8 { self.src[self.pos] } |
| 1563 | fn is_end(&self) -> bool { self.pos >= self.src.len() } |
| 1564 | } |
| 1565 | |
| 1566 | struct GreedyParser {} |
| 1567 | impl LZParse for GreedyParser { |
| 1568 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>) { |
| 1569 | dst.reserve(src.len()); |
| 1570 | |
| 1571 | let mut matcher = MatchFinder::new(src); |
| 1572 | while !matcher.is_end() { |
| 1573 | let (best_pos, best_len) = matcher.find_match(); |
| 1574 | |
| 1575 | if best_len >= 3 { |
| 1576 | dst.push(Token::from_match(best_pos, best_len)); |
| 1577 | matcher.advance(best_len as usize); |
| 1578 | } else { |
| 1579 | dst.push(Token::from_literal(matcher.get_sym())); |
| 1580 | matcher.advance(1); |
| 1581 | } |
| 1582 | } |
| 1583 | dst.push(TOKEN_EOB); |
| 1584 | } |
| 1585 | } |
| 1586 | |
| 1587 | struct LazyParser {} |
| 1588 | impl LZParse for LazyParser { |
| 1589 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>) { |
| 1590 | dst.reserve(src.len()); |
| 1591 | |
| 1592 | let mut matcher = MatchFinder::new(src); |
| 1593 | while !matcher.is_end() { |
| 1594 | let (best_pos, best_len) = matcher.find_match(); |
| 1595 | if best_len >= 3 { |
| 1596 | let last_sym = matcher.get_sym(); |
| 1597 | matcher.advance(1); |
| 1598 | if !matcher.is_end() { |
| 1599 | let (best_pos1, best_len1) = matcher.find_match(); |
| 1600 | if best_len1 > best_len + 1 { |
| 1601 | dst.push(Token::from_literal(last_sym)); |
| 1602 | dst.push(Token::from_match(best_pos1, best_len1)); |
| 1603 | matcher.advance(best_len1 as usize); |
| 1604 | continue; |
| 1605 | } |
| 1606 | } |
| 1607 | dst.push(Token::from_match(best_pos, best_len)); |
| 1608 | matcher.advance((best_len - 1) as usize); |
| 1609 | } else { |
| 1610 | dst.push(Token::from_literal(matcher.get_sym())); |
| 1611 | matcher.advance(1); |
| 1612 | } |
| 1613 | } |
| 1614 | dst.push(TOKEN_EOB); |
| 1615 | } |
| 1616 | } |
| 1617 | |
| 1618 | #[derive(Clone,Copy)] |
| 1619 | struct TNode { |
| 1620 | price: u32, |
| 1621 | dist: u16, |
| 1622 | link: usize, |
| 1623 | } |
| 1624 | |
| 1625 | impl Default for TNode { |
| 1626 | fn default() -> Self { |
| 1627 | Self { |
| 1628 | price: std::u32::MAX, |
| 1629 | dist: 0, |
| 1630 | link: 0, |
| 1631 | } |
| 1632 | } |
| 1633 | } |
| 1634 | |
| 1635 | struct OptimalParser { |
| 1636 | trellis: Vec<TNode>, |
| 1637 | } |
| 1638 | impl OptimalParser { |
| 1639 | fn new() -> Self { Self::default() } |
| 1640 | fn sym_price(_sym: u8) -> u32 { 9 } |
| 1641 | fn match_price(dist: u16, _len: u16) -> u32 { |
| 1642 | if dist <= 4 { |
| 1643 | 9 + 5 |
| 1644 | } else { |
| 1645 | let bits = 16 - (dist - 1).leading_zeros(); |
| 1646 | 9 + 3 + bits |
| 1647 | } |
| 1648 | } |
| 1649 | } |
| 1650 | impl Default for OptimalParser { |
| 1651 | fn default() -> Self { |
| 1652 | Self { |
| 1653 | trellis: Vec::with_capacity(WINDOW_SIZE), |
| 1654 | } |
| 1655 | } |
| 1656 | } |
| 1657 | impl LZParse for OptimalParser { |
| 1658 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>) { |
| 1659 | if src.is_empty() { |
| 1660 | dst.push(TOKEN_EOB); |
| 1661 | return; |
| 1662 | } |
| 1663 | dst.reserve(src.len()); |
| 1664 | |
| 1665 | self.trellis.clear(); |
| 1666 | self.trellis.reserve(src.len() + 1); |
| 1667 | for _ in 0..=src.len() { |
| 1668 | self.trellis.push(TNode::default()); |
| 1669 | } |
| 1670 | self.trellis[0].price = 0; |
| 1671 | |
| 1672 | let mut matcher = MatchFinder::new(src); |
| 1673 | for i in 0..self.trellis.len() - 1 { |
| 1674 | let mut dists = [0; MAX_MATCH_LEN + 1]; |
| 1675 | matcher.find_all_matches(&mut dists); |
| 1676 | |
| 1677 | let sym = matcher.get_sym(); |
| 1678 | let lprice = Self::sym_price(sym) + self.trellis[i].price; |
| 1679 | if self.trellis[i + 1].price > lprice { |
| 1680 | self.trellis[i + 1].price = lprice; |
| 1681 | self.trellis[i + 1].link = i; |
| 1682 | } |
| 1683 | for (len, &dist) in dists.iter().enumerate() { |
| 1684 | if dist != 0 { |
| 1685 | let mprice = Self::match_price(dist, len as u16) + self.trellis[i].price; |
| 1686 | if self.trellis[i + len].price > mprice { |
| 1687 | self.trellis[i + len].price = mprice; |
| 1688 | self.trellis[i + len].link = i; |
| 1689 | self.trellis[i].dist = dist; |
| 1690 | } |
| 1691 | } |
| 1692 | } |
| 1693 | |
| 1694 | matcher.advance(1); |
| 1695 | } |
| 1696 | let mut idx = self.trellis.len() - 1; |
| 1697 | let mut nidx = self.trellis[idx].link; |
| 1698 | while idx > 0 { |
| 1699 | let oidx = idx; |
| 1700 | idx = nidx; |
| 1701 | nidx = self.trellis[idx].link; |
| 1702 | self.trellis[idx].link = oidx; |
| 1703 | } |
| 1704 | |
| 1705 | let mut idx = 0; |
| 1706 | while idx < self.trellis.len() - 1 { |
| 1707 | let len = self.trellis[idx].link - idx; |
| 1708 | if len == 1 { |
| 1709 | dst.push(Token::from_literal(src[idx])); |
| 1710 | } else { |
| 1711 | dst.push(Token::from_match(self.trellis[idx].dist, len as u16)); |
| 1712 | } |
| 1713 | idx = self.trellis[idx].link; |
| 1714 | } |
| 1715 | |
| 1716 | dst.push(TOKEN_EOB); |
| 1717 | } |
| 1718 | } |
| 1719 | |
| 1720 | ///! Deflate compression mode. |
| 1721 | #[derive(Clone,Copy,Debug,PartialEq)] |
| 1722 | pub enum DeflateMode { |
| 1723 | ///! No compression. |
| 1724 | NoCompr, |
| 1725 | ///! Fast compression. |
| 1726 | Fast, |
| 1727 | ///! Still fast but better compression. |
| 1728 | Better, |
| 1729 | ///! Slow but the best compression. |
| 1730 | Best, |
| 1731 | } |
| 1732 | |
| 1733 | impl Default for DeflateMode { |
| 1734 | fn default() -> Self { DeflateMode::Better } |
| 1735 | } |
| 1736 | |
| 1737 | pub const DEFLATE_MODE_DESCRIPTION: &str = "Deflate compression level."; |
| 1738 | ///! Deflate option for no compression. |
| 1739 | pub const DEFLATE_MODE_NONE: &str = "none"; |
| 1740 | ///! Deflate option for fast compression. |
| 1741 | pub const DEFLATE_MODE_FAST: &str = "fast"; |
| 1742 | ///! Deflate option for better compression. |
| 1743 | pub const DEFLATE_MODE_BETTER: &str = "better"; |
| 1744 | ///! Deflate option for best compression. |
| 1745 | pub const DEFLATE_MODE_BEST: &str = "best"; |
| 1746 | |
| 1747 | ///! All possible option values for deflate compression. |
| 1748 | pub const DEFLATE_OPTION_VALUES: NAOptionDefinitionType = NAOptionDefinitionType::String(Some(&[DEFLATE_MODE_NONE, DEFLATE_MODE_FAST, DEFLATE_MODE_BETTER, DEFLATE_MODE_BEST])); |
| 1749 | |
| 1750 | impl std::str::FromStr for DeflateMode { |
| 1751 | type Err = (); |
| 1752 | |
| 1753 | fn from_str(s: &str) -> Result<Self, Self::Err> { |
| 1754 | match s { |
| 1755 | DEFLATE_MODE_NONE => Ok(DeflateMode::NoCompr), |
| 1756 | DEFLATE_MODE_FAST => Ok(DeflateMode::Fast), |
| 1757 | DEFLATE_MODE_BETTER => Ok(DeflateMode::Better), |
| 1758 | DEFLATE_MODE_BEST => Ok(DeflateMode::Best), |
| 1759 | _ => Err(()), |
| 1760 | } |
| 1761 | } |
| 1762 | } |
| 1763 | |
| 1764 | impl ToString for DeflateMode { |
| 1765 | fn to_string(&self) -> String { |
| 1766 | match *self { |
| 1767 | DeflateMode::NoCompr => DEFLATE_MODE_NONE.to_string(), |
| 1768 | DeflateMode::Fast => DEFLATE_MODE_FAST.to_string(), |
| 1769 | DeflateMode::Better => DEFLATE_MODE_BETTER.to_string(), |
| 1770 | DeflateMode::Best => DEFLATE_MODE_BEST.to_string(), |
| 1771 | } |
| 1772 | } |
| 1773 | } |
| 1774 | |
| 1775 | #[derive(Clone,Copy,Debug,PartialEq)] |
| 1776 | enum Mode { |
| 1777 | Copy, |
| 1778 | Fixed, |
| 1779 | Dynamic, |
| 1780 | } |
| 1781 | |
| 1782 | const MAX_BLOCK_SIZE: usize = 65535; |
| 1783 | |
| 1784 | ///! Deflate stream compressor. |
| 1785 | pub struct Deflate { |
| 1786 | mode: Mode, |
| 1787 | tokens: Vec<Token>, |
| 1788 | srcbuf: [u8; MAX_BLOCK_SIZE], |
| 1789 | ssize: usize, |
| 1790 | sum1: u32, |
| 1791 | sum2: u32, |
| 1792 | zlib_mode: bool, |
| 1793 | parser: Box<dyn LZParse + Send>, |
| 1794 | } |
| 1795 | |
| 1796 | impl Deflate { |
| 1797 | ///! Creates a new instance of `Deflate`. |
| 1798 | pub fn new(mode: DeflateMode) -> Self { |
| 1799 | let (mode, parser) = match mode { |
| 1800 | DeflateMode::NoCompr => (Mode::Copy, Box::new(NoParser{}) as Box<dyn LZParse + Send>), |
| 1801 | DeflateMode::Fast => (Mode::Fixed, Box::new(GreedyParser{}) as Box<dyn LZParse + Send>), |
| 1802 | DeflateMode::Better => (Mode::Dynamic, Box::new(LazyParser{}) as Box<dyn LZParse + Send>), |
| 1803 | DeflateMode::Best => (Mode::Dynamic, Box::new(OptimalParser::new()) as Box<dyn LZParse + Send>), |
| 1804 | }; |
| 1805 | Self { |
| 1806 | mode, parser, |
| 1807 | tokens: Vec::with_capacity(MAX_BLOCK_SIZE), |
| 1808 | srcbuf: [0; MAX_BLOCK_SIZE], |
| 1809 | ssize: 0, |
| 1810 | sum1: 1, |
| 1811 | sum2: 0, |
| 1812 | zlib_mode: false, |
| 1813 | } |
| 1814 | } |
| 1815 | ///! Writes zlib stream header. |
| 1816 | pub fn write_zlib_header(&mut self, wr: &mut DeflateWriter) { |
| 1817 | wr.write(8, 4); |
| 1818 | wr.write(7, 4); |
| 1819 | let level = match self.mode { |
| 1820 | Mode::Copy => 0x01, |
| 1821 | Mode::Fixed => 0x5E, |
| 1822 | Mode::Dynamic => 0x9C, |
| 1823 | // 0xDA for the strongest one |
| 1824 | }; |
| 1825 | wr.write(level, 8); |
| 1826 | self.zlib_mode = true; |
| 1827 | } |
| 1828 | fn write_zlib_footer(&self, wr: &mut DeflateWriter) { |
| 1829 | wr.align(); |
| 1830 | wr.write((self.sum2 >> 8) as u16, 8); |
| 1831 | wr.write((self.sum2 & 0xFF) as u16, 8); |
| 1832 | wr.write((self.sum1 >> 8) as u16, 8); |
| 1833 | wr.write((self.sum1 & 0xFF) as u16, 8); |
| 1834 | } |
| 1835 | ///! Queues data for compression. |
| 1836 | ///! |
| 1837 | ///! The data might be not actually compressed until [`compress_end`] is called. |
| 1838 | ///! |
| 1839 | ///! [`compress_end`]: ./struct.Deflate.html#method.compress_end |
| 1840 | pub fn compress(&mut self, src: &[u8], wr: &mut DeflateWriter) { |
| 1841 | let mut src = src; |
| 1842 | while !src.is_empty() { |
| 1843 | let clen = src.len().min(MAX_BLOCK_SIZE - self.ssize); |
| 1844 | let (head, tail) = src.split_at(clen); |
| 1845 | src = tail; |
| 1846 | self.srcbuf[self.ssize..][..clen].copy_from_slice(head); |
| 1847 | self.ssize += clen; |
| 1848 | if self.ssize == MAX_BLOCK_SIZE { |
| 1849 | self.do_block(wr, false); |
| 1850 | } |
| 1851 | } |
| 1852 | } |
| 1853 | ///! Tells the encoder to finish data compression. |
| 1854 | ///! |
| 1855 | ///! Complete data will be output after this call. |
| 1856 | pub fn compress_end(&mut self, wr: &mut DeflateWriter) { |
| 1857 | if self.ssize > 0 { |
| 1858 | self.do_block(wr, true); |
| 1859 | } else { |
| 1860 | wr.write(1, 1); |
| 1861 | wr.write(1, 2); |
| 1862 | wr.write(0, 7); //static EOF sym |
| 1863 | } |
| 1864 | if self.zlib_mode { |
| 1865 | self.write_zlib_footer(wr); |
| 1866 | } |
| 1867 | } |
| 1868 | ///! Tells the encoder to compress the data it received and flush it. |
| 1869 | pub fn compress_flush(&mut self, wr: &mut DeflateWriter) { |
| 1870 | if self.ssize > 0 { |
| 1871 | self.do_block(wr, false); |
| 1872 | } |
| 1873 | if (wr.bits & 7) != 0 { |
| 1874 | // write zero-length copy block for byte-alignment |
| 1875 | wr.write(0, 1); |
| 1876 | wr.write(0, 2); |
| 1877 | wr.align(); |
| 1878 | wr.write(0, 16); |
| 1879 | wr.write(0xFFFF, 16); |
| 1880 | } |
| 1881 | } |
| 1882 | fn do_block(&mut self, wr: &mut DeflateWriter, final_block: bool) { |
| 1883 | const CRC_BASE: u32 = 65521; |
| 1884 | for &b in self.srcbuf[..self.ssize].iter() { |
| 1885 | self.sum1 += u32::from(b); |
| 1886 | if self.sum1 >= CRC_BASE { |
| 1887 | self.sum1 -= CRC_BASE; |
| 1888 | } |
| 1889 | self.sum2 += self.sum1; |
| 1890 | if self.sum2 >= CRC_BASE { |
| 1891 | self.sum2 -= CRC_BASE; |
| 1892 | } |
| 1893 | } |
| 1894 | match self.mode { |
| 1895 | Mode::Copy => { |
| 1896 | wr.write(final_block as u16, 1); |
| 1897 | wr.write(0, 2); |
| 1898 | wr.align(); |
| 1899 | wr.write(self.ssize as u16, 16); |
| 1900 | wr.write(!self.ssize as u16, 16); |
| 1901 | for &b in self.srcbuf[..self.ssize].iter() { |
| 1902 | wr.write(u16::from(b), 8); |
| 1903 | } |
| 1904 | }, |
| 1905 | Mode::Fixed => { |
| 1906 | wr.write(final_block as u16, 1); |
| 1907 | wr.write(1, 2); |
| 1908 | self.tokens.clear(); |
| 1909 | self.parser.parse(&self.srcbuf[..self.ssize], &mut self.tokens); |
| 1910 | let mut codes = CodeHuff::new(true); |
| 1911 | codes.make_codes(&self.tokens); |
| 1912 | let mut dists = DistHuff::new(true); |
| 1913 | dists.make_codes(&self.tokens); |
| 1914 | wr.write_tokens(&self.tokens, &codes, &dists); |
| 1915 | }, |
| 1916 | Mode::Dynamic => { |
| 1917 | wr.write(final_block as u16, 1); |
| 1918 | wr.write(2, 2); |
| 1919 | self.tokens.clear(); |
| 1920 | self.parser.parse(&self.srcbuf[..self.ssize], &mut self.tokens); |
| 1921 | let mut codes = CodeHuff::new(false); |
| 1922 | codes.make_codes(&self.tokens); |
| 1923 | let mut dists = DistHuff::new(false); |
| 1924 | dists.make_codes(&self.tokens); |
| 1925 | wr.write((codes.num_codes - 257) as u16, 5); |
| 1926 | wr.write((dists.num_codes - 1) as u16, 5); |
| 1927 | wr.write_codes(&codes, &dists); |
| 1928 | wr.write_tokens(&self.tokens, &codes, &dists); |
| 1929 | }, |
| 1930 | } |
| 1931 | self.ssize = 0; |
| 1932 | } |
| 1933 | } |
| 1934 | |
| 1935 | #[cfg(test)] |
| 1936 | mod test { |
| 1937 | use super::*; |
| 1938 | |
| 1939 | #[test] |
| 1940 | fn test_inflate1() { |
| 1941 | const TEST_DATA: &[u8] = &[ |
| 1942 | 0xF3, 0x48, 0xCD, 0xC9, 0xC9, 0xD7, 0x51, 0x28, |
| 1943 | 0xCF, 0x2F, 0xCA, 0x49, 0x51, 0x04, 0x00 ]; |
| 1944 | const TEST_REF: &[u8] = b"Hello, world!"; |
| 1945 | let mut dst_buf = [0u8; 13]; |
| 1946 | let len = Inflate::uncompress(TEST_DATA, &mut dst_buf).unwrap(); |
| 1947 | assert_eq!(len, 13); |
| 1948 | for i in 0..len { |
| 1949 | assert_eq!(dst_buf[i], TEST_REF[i]); |
| 1950 | } |
| 1951 | } |
| 1952 | #[test] |
| 1953 | fn test_inflate2() { |
| 1954 | const TEST_DATA3: &[u8] = &[ 0x4B, 0x4C, 0x44, 0x80, 0x24, 0x54, 0x80, 0x2C, 0x06, 0x00 ]; |
| 1955 | const TEST_REF3: &[u8] = b"aaaaaaaaaaaabbbbbbbbbbbbbbbaaaaabbbbbbb"; |
| 1956 | let mut dst_buf = [0u8; 39]; |
| 1957 | |
| 1958 | let mut inflate = Inflate::new(); |
| 1959 | let mut output_chunk = [0u8; 7]; |
| 1960 | let mut output_pos = 0; |
| 1961 | for input in TEST_DATA3.chunks(3) { |
| 1962 | let mut repeat = false; |
| 1963 | loop { |
| 1964 | let ret = inflate.decompress_data(input, &mut output_chunk, repeat); |
| 1965 | match ret { |
| 1966 | Ok(len) => { |
| 1967 | for i in 0..len { |
| 1968 | dst_buf[output_pos + i] = output_chunk[i]; |
| 1969 | } |
| 1970 | output_pos += len; |
| 1971 | break; |
| 1972 | }, |
| 1973 | Err(DecompressError::ShortData) => { |
| 1974 | break; |
| 1975 | }, |
| 1976 | Err(DecompressError::OutputFull) => { |
| 1977 | repeat = true; |
| 1978 | for i in 0..output_chunk.len() { |
| 1979 | dst_buf[output_pos + i] = output_chunk[i]; |
| 1980 | } |
| 1981 | output_pos += output_chunk.len(); |
| 1982 | }, |
| 1983 | _ => { |
| 1984 | panic!("decompress error {:?}", ret.err().unwrap()); |
| 1985 | }, |
| 1986 | } |
| 1987 | } |
| 1988 | } |
| 1989 | |
| 1990 | assert_eq!(output_pos, dst_buf.len()); |
| 1991 | for i in 0..output_pos { |
| 1992 | assert_eq!(dst_buf[i], TEST_REF3[i]); |
| 1993 | } |
| 1994 | } |
| 1995 | #[test] |
| 1996 | fn test_inflate3() { |
| 1997 | const TEST_DATA: &[u8] = &[ |
| 1998 | 0x1F, 0x8B, 0x08, 0x08, 0xF6, 0x7B, 0x90, 0x5E, 0x02, 0x03, 0x31, 0x2E, 0x74, 0x78, 0x74, 0x00, |
| 1999 | 0xE5, 0x95, 0x4B, 0x4E, 0xC3, 0x30, 0x10, 0x40, 0xF7, 0x39, 0xC5, 0x1C, 0x00, 0x16, 0x70, 0x83, |
| 2000 | 0x0A, 0xB5, 0x3B, 0xE8, 0x82, 0x5E, 0x60, 0x1A, 0x4F, 0xE2, 0x11, 0xFE, 0x44, 0x1E, 0xA7, 0x69, |
| 2001 | 0x6E, 0xCF, 0x38, 0xDD, 0xB0, 0x40, 0xA2, 0x46, 0x2D, 0x20, 0x2A, 0xE5, 0xAB, 0xCC, 0xE7, 0xBD, |
| 2002 | 0x49, 0xAC, 0x6C, 0x03, 0x64, 0x4B, 0xD0, 0x71, 0x92, 0x0C, 0x06, 0x67, 0x88, 0x1D, 0x3C, 0xD9, |
| 2003 | 0xC4, 0x92, 0x3D, 0x4A, 0xF3, 0x3C, 0x43, 0x4E, 0x23, 0x81, 0x8B, 0x07, 0x82, 0x1E, 0xF5, 0x90, |
| 2004 | 0x23, 0x78, 0x6A, 0x56, 0x30, 0x60, 0xCA, 0x89, 0x4D, 0x4F, 0xC0, 0x01, 0x10, 0x06, 0xC2, 0xA4, |
| 2005 | 0xA1, 0x44, 0xCD, 0xF6, 0x54, 0x50, 0xA8, 0x8D, 0xC1, 0x9C, 0x5F, 0x71, 0x37, 0x45, 0xC8, 0x63, |
| 2006 | 0xCA, 0x8E, 0xC0, 0xE8, 0x23, 0x69, 0x56, 0x9A, 0x8D, 0x5F, 0xB6, 0xC9, 0x96, 0x53, 0x4D, 0x17, |
| 2007 | 0xAB, 0xB9, 0xB0, 0x49, 0x14, 0x5A, 0x0B, 0x96, 0x82, 0x7C, 0xB7, 0x6F, 0x17, 0x35, 0xC7, 0x9E, |
| 2008 | 0xDF, 0x78, 0xA3, 0xF1, 0xD0, 0xA2, 0x73, 0x1C, 0x7A, 0xD8, 0x2B, 0xB3, 0x5C, 0x90, 0x85, 0xBB, |
| 2009 | 0x2A, 0x14, 0x2E, 0xF7, 0xD1, 0x19, 0x48, 0x0A, 0x23, 0x57, 0x45, 0x13, 0x3E, 0xD6, 0xA0, 0xBD, |
| 2010 | 0xF2, 0x11, 0x7A, 0x22, 0x21, 0xAD, 0xE5, 0x70, 0x56, 0xA0, 0x9F, 0xA5, 0xA5, 0x03, 0x85, 0x2A, |
| 2011 | 0xDE, 0x92, 0x00, 0x32, 0x61, 0x10, 0xAD, 0x27, 0x13, 0x7B, 0x5F, 0x98, 0x7F, 0x59, 0x83, 0xB8, |
| 2012 | 0xB7, 0x35, 0x16, 0xEB, 0x12, 0x0F, 0x1E, 0xD9, 0x14, 0x0B, 0xCF, 0xEE, 0x6D, 0x91, 0xF8, 0x93, |
| 2013 | 0x6E, 0x81, 0x3F, 0x7F, 0x41, 0xA4, 0x22, 0x1F, 0xB7, 0xE6, 0x85, 0x83, 0x9A, 0xA2, 0x61, 0x12, |
| 2014 | 0x0D, 0x0F, 0x6D, 0x01, 0xBD, 0xB0, 0xE8, 0x1D, 0xEC, 0xD1, 0xA0, 0xBF, 0x1F, 0x4E, 0xFB, 0x55, |
| 2015 | 0xBD, 0x73, 0xDD, 0x87, 0xB9, 0x53, 0x23, 0x17, 0xD3, 0xE2, 0xE9, 0x08, 0x87, 0x42, 0xFF, 0xCF, |
| 2016 | 0x26, 0x42, 0xAE, 0x76, 0xB5, 0xAE, 0x97, 0x0C, 0x18, 0x78, 0xA0, 0x24, 0xE5, 0x54, 0x0C, 0x6E, |
| 2017 | 0x60, 0x52, 0x79, 0x22, 0x57, 0xF5, 0x87, 0x78, 0x78, 0x04, 0x93, 0x46, 0xEF, 0xCB, 0x98, 0x96, |
| 2018 | 0x8B, 0x65, 0x00, 0xB7, 0x36, 0xBD, 0x77, 0xA8, 0xBD, 0x5A, 0xAA, 0x1A, 0x09, 0x00, 0x00 |
| 2019 | ]; |
| 2020 | |
| 2021 | let mut mr = MemoryReader::new_read(TEST_DATA); |
| 2022 | let mut br = ByteReader::new(&mut mr); |
| 2023 | let _dst_buf = gzip_decode(&mut br, false).unwrap(); |
| 2024 | |
| 2025 | // println!("{}", String::from_utf8_lossy(_dst_buf.as_slice())); |
| 2026 | } |
| 2027 | #[test] |
| 2028 | fn test_deflate_crc() { |
| 2029 | let output = Vec::with_capacity(20); |
| 2030 | let mut writer = DeflateWriter::new(output); |
| 2031 | let mut compr = Deflate::new(DeflateMode::NoCompr); |
| 2032 | compr.write_zlib_header(&mut writer); |
| 2033 | compr.compress(b"Hello, world!", &mut writer); |
| 2034 | compr.compress_end(&mut writer); |
| 2035 | let output = writer.end(); |
| 2036 | assert_eq!(output.as_slice(), b"\x78\x01\x01\x0D\x00\xF2\xFFHello, world!\x20\x5E\x04\x8A"); |
| 2037 | } |
| 2038 | fn deflate_test(mode: DeflateMode) { |
| 2039 | const SRC: &[u8] = |
| 2040 | b"The first day of Christmas, |
| 2041 | My true love sent to me |
| 2042 | A partridge in a pear tree. |
| 2043 | |
| 2044 | The second day of Christmas, |
| 2045 | My true love sent to me |
| 2046 | Two turtle doves, and |
| 2047 | A partridge in a pear tree. |
| 2048 | |
| 2049 | The third day of Christmas, |
| 2050 | My true love sent to me |
| 2051 | Three French hens, |
| 2052 | Two turtle doves, and |
| 2053 | A partridge in a pear tree. |
| 2054 | |
| 2055 | The fourth day of Christmas, |
| 2056 | My true love sent to me |
| 2057 | Four colly birds, |
| 2058 | Three French hens, |
| 2059 | Two turtle doves, and |
| 2060 | A partridge in a pear tree. |
| 2061 | |
| 2062 | The fifth day of Christmas, |
| 2063 | My true love sent to me |
| 2064 | Five gold rings, |
| 2065 | Four colly birds, |
| 2066 | Three French hens, |
| 2067 | Two turtle doves, and |
| 2068 | A partridge in a pear tree."; |
| 2069 | let output = Vec::with_capacity(SRC.len() + 16); |
| 2070 | let mut writer = DeflateWriter::new(output); |
| 2071 | let mut compr = Deflate::new(mode); |
| 2072 | compr.write_zlib_header(&mut writer); |
| 2073 | compr.compress(SRC, &mut writer); |
| 2074 | compr.compress_end(&mut writer); |
| 2075 | let output = writer.end(); |
| 2076 | let mut uncompr = vec![0u8; SRC.len()]; |
| 2077 | Inflate::uncompress(&output, &mut uncompr).unwrap(); |
| 2078 | assert_eq!(SRC, uncompr.as_slice()); |
| 2079 | } |
| 2080 | #[test] |
| 2081 | fn test_deflate_fast() { |
| 2082 | deflate_test(DeflateMode::Fast); |
| 2083 | } |
| 2084 | #[test] |
| 2085 | fn test_deflate_better() { |
| 2086 | deflate_test(DeflateMode::Better); |
| 2087 | } |
| 2088 | #[test] |
| 2089 | fn test_deflate_best() { |
| 2090 | deflate_test(DeflateMode::Best); |
| 2091 | } |
| 2092 | } |