| 1 | //! Deflate format (RFC 1951) support. |
| 2 | //! |
| 3 | //! This module provides functionality for decompressing raw deflated streams via [`Inflate`] and gzip files (RFC 1952) via [`gzip_decode`] and compressing raw or zlib streams via [`Deflate`]. |
| 4 | //! |
| 5 | //! [`Deflate`]: ./struct.Deflate.html |
| 6 | //! [`Inflate`]: ./struct.Inflate.html |
| 7 | //! [`gzip_decode`]: ./fn.gzip_decode.html |
| 8 | //! |
| 9 | //! # Examples |
| 10 | //! |
| 11 | //! Decompressing full input buffer into sufficiently large output buffer: |
| 12 | //! ``` |
| 13 | //! # use nihav_core::compr::DecompressError; |
| 14 | //! use nihav_core::compr::deflate::Inflate; |
| 15 | //! |
| 16 | //! # fn decompress(input: &[u8]) -> Result<(), DecompressError> { |
| 17 | //! # let mut output_buffer = [0u8; 16]; |
| 18 | //! let output_length = Inflate::uncompress(input, &mut output_buffer)?; |
| 19 | //! # Ok(()) |
| 20 | //! # } |
| 21 | //! ``` |
| 22 | //! |
| 23 | //! Decompressing input chunks into portions of output: |
| 24 | //! ``` |
| 25 | //! use nihav_core::compr::DecompressError; |
| 26 | //! use nihav_core::compr::deflate::Inflate; |
| 27 | //! |
| 28 | //! # fn decompress(input_data: &[u8]) -> Result<(), DecompressError> { |
| 29 | //! let mut inflate = Inflate::new(); |
| 30 | //! let mut dst_buf: Vec<u8> = Vec::new(); |
| 31 | //! let mut output_chunk = [0u8; 1024]; |
| 32 | //! for src in input_data.chunks(512) { |
| 33 | //! let mut repeat = false; |
| 34 | //! loop { |
| 35 | //! let ret = inflate.decompress_data(src, &mut output_chunk, repeat); |
| 36 | //! match ret { |
| 37 | //! Ok(len) => { // we got a buffer decoded successfully to the end |
| 38 | //! dst_buf.extend_from_slice(&output_chunk[..len]); |
| 39 | //! break; |
| 40 | //! }, |
| 41 | //! Err(DecompressError::ShortData) => { // this block of data was fully read |
| 42 | //! break; |
| 43 | //! }, |
| 44 | //! Err(DecompressError::OutputFull) => { |
| 45 | //! // the output buffer is full, flush it and continue decoding the same block |
| 46 | //! repeat = true; |
| 47 | //! dst_buf.extend_from_slice(&output_chunk); |
| 48 | //! }, |
| 49 | //! Err(err) => { |
| 50 | //! return Err(err); |
| 51 | //! }, |
| 52 | //! } |
| 53 | //! } |
| 54 | //! } |
| 55 | //! # Ok(()) |
| 56 | //! # } |
| 57 | //! ``` |
| 58 | //! |
| 59 | //! Compressing input buffer into zlib stream: |
| 60 | //! ``` |
| 61 | //! use nihav_core::compr::deflate::{Deflate, DeflateMode, DeflateWriter}; |
| 62 | //! |
| 63 | //! # fn compress(input: &[u8]) { |
| 64 | //! let output = Vec::with_capacity(input.len() * 65540 / 65535 + 6); |
| 65 | //! let mut writer = DeflateWriter::new(output); |
| 66 | //! let mut compr = Deflate::new(DeflateMode::Fast); |
| 67 | //! compr.write_zlib_header(&mut writer); |
| 68 | //! compr.compress(input, &mut writer); |
| 69 | //! compr.compress_end(&mut writer); |
| 70 | //! let output = writer.end(); |
| 71 | //! # } |
| 72 | //! ``` |
| 73 | |
| 74 | use crate::io::byteio::*; |
| 75 | use crate::io::bitreader::*; |
| 76 | use crate::io::codebook::*; |
| 77 | use super::*; |
| 78 | |
| 79 | const NUM_LITERALS: usize = 287; |
| 80 | const NUM_DISTS: usize = 32; |
| 81 | |
| 82 | struct FixedLenCodeReader {} |
| 83 | |
| 84 | impl CodebookDescReader<u16> for FixedLenCodeReader { |
| 85 | fn bits(&mut self, idx: usize) -> u8 { |
| 86 | if idx < 144 { 8 } |
| 87 | else if idx < 256 { 9 } |
| 88 | else if idx < 280 { 7 } |
| 89 | else { 8 } |
| 90 | } |
| 91 | #[allow(clippy::identity_op)] |
| 92 | fn code(&mut self, idx: usize) -> u32 { |
| 93 | let base = idx as u32; |
| 94 | let bits = self.bits(idx); |
| 95 | if idx < 144 { reverse_bits(base + 0x30, bits) } |
| 96 | else if idx < 256 { reverse_bits(base + 0x190 - 144, bits) } |
| 97 | else if idx < 280 { reverse_bits(base + 0x000 - 256, bits) } |
| 98 | else { reverse_bits(base + 0xC0 - 280, bits) } |
| 99 | } |
| 100 | fn sym (&mut self, idx: usize) -> u16 { idx as u16 } |
| 101 | fn len(&mut self) -> usize { NUM_LITERALS + 1 } |
| 102 | } |
| 103 | |
| 104 | #[derive(Clone,Copy,Default)] |
| 105 | struct BitReaderState { |
| 106 | pos: usize, |
| 107 | bitbuf: u32, |
| 108 | bits: u8, |
| 109 | } |
| 110 | |
| 111 | struct CurrentSource<'a> { |
| 112 | src: &'a [u8], |
| 113 | br: BitReaderState, |
| 114 | } |
| 115 | |
| 116 | impl<'a> CurrentSource<'a> { |
| 117 | fn new(src: &'a [u8], br: BitReaderState) -> Self { |
| 118 | let mut newsrc = Self { src, br }; |
| 119 | newsrc.br.pos = 0; |
| 120 | newsrc.refill(); |
| 121 | newsrc |
| 122 | } |
| 123 | fn reinit(src: &'a [u8], br: BitReaderState) -> Self { |
| 124 | let mut newsrc = Self { src, br }; |
| 125 | newsrc.refill(); |
| 126 | newsrc |
| 127 | } |
| 128 | fn refill(&mut self) { |
| 129 | while (self.br.bits <= 24) && (self.br.pos < self.src.len()) { |
| 130 | self.br.bitbuf |= u32::from(self.src[self.br.pos]) << self.br.bits; |
| 131 | self.br.bits += 8; |
| 132 | self.br.pos += 1; |
| 133 | } |
| 134 | } |
| 135 | fn skip_cache(&mut self, nbits: u8) { |
| 136 | self.br.bitbuf >>= nbits; |
| 137 | self.br.bits -= nbits; |
| 138 | } |
| 139 | fn read(&mut self, nbits: u8) -> BitReaderResult<u32> { |
| 140 | if nbits == 0 { return Ok(0); } |
| 141 | if nbits > 16 { return Err(BitReaderError::TooManyBitsRequested); } |
| 142 | if self.br.bits < nbits { |
| 143 | self.refill(); |
| 144 | if self.br.bits < nbits { return Err(BitReaderError::BitstreamEnd); } |
| 145 | } |
| 146 | let ret = self.br.bitbuf & ((1 << nbits) - 1); |
| 147 | self.skip_cache(nbits); |
| 148 | Ok(ret) |
| 149 | } |
| 150 | fn read_bool(&mut self) -> BitReaderResult<bool> { |
| 151 | if self.br.bits == 0 { |
| 152 | self.refill(); |
| 153 | if self.br.bits == 0 { return Err(BitReaderError::BitstreamEnd); } |
| 154 | } |
| 155 | let ret = (self.br.bitbuf & 1) != 0; |
| 156 | self.skip_cache(1); |
| 157 | Ok(ret) |
| 158 | } |
| 159 | fn peek(&mut self, nbits: u8) -> u32 { |
| 160 | if nbits == 0 || nbits > 16 { return 0; } |
| 161 | if self.br.bits < nbits { |
| 162 | self.refill(); |
| 163 | } |
| 164 | self.br.bitbuf & ((1 << nbits) - 1) |
| 165 | } |
| 166 | fn skip(&mut self, nbits: u32) -> BitReaderResult<()> { |
| 167 | if u32::from(self.br.bits) >= nbits { |
| 168 | self.skip_cache(nbits as u8); |
| 169 | } else { |
| 170 | unreachable!(); |
| 171 | } |
| 172 | Ok(()) |
| 173 | } |
| 174 | fn align(&mut self) { |
| 175 | let b = self.br.bits & 7; |
| 176 | if b != 0 { |
| 177 | self.skip_cache(b); |
| 178 | } |
| 179 | } |
| 180 | fn left(&self) -> isize { |
| 181 | ((self.src.len() as isize) - (self.br.pos as isize)) * 8 + (self.br.bits as isize) |
| 182 | } |
| 183 | } |
| 184 | |
| 185 | impl<'a, S: Copy> CodebookReader<S> for CurrentSource<'a> { |
| 186 | fn read_cb(&mut self, cb: &Codebook<S>) -> CodebookResult<S> { |
| 187 | let mut esc = true; |
| 188 | let mut idx = 0; |
| 189 | let mut lut_bits = cb.lut_bits; |
| 190 | let orig_br = self.br; |
| 191 | while esc { |
| 192 | let lut_idx = (self.peek(lut_bits) as usize) + (idx as usize); |
| 193 | if cb.table[lut_idx] == TABLE_FILL_VALUE { return Err(CodebookError::InvalidCode); } |
| 194 | let bits = cb.table[lut_idx] & 0x7F; |
| 195 | esc = (cb.table[lut_idx] & 0x80) != 0; |
| 196 | idx = (cb.table[lut_idx] >> 8) as usize; |
| 197 | let skip_bits = if esc { u32::from(lut_bits) } else { bits }; |
| 198 | if (skip_bits as isize) > self.left() { |
| 199 | self.br = orig_br; |
| 200 | self.refill(); |
| 201 | return Err(CodebookError::MemoryError); |
| 202 | } |
| 203 | self.skip(skip_bits as u32).unwrap(); |
| 204 | lut_bits = bits as u8; |
| 205 | } |
| 206 | Ok(cb.syms[idx]) |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | enum InflateState { |
| 211 | Start, |
| 212 | BlockStart, |
| 213 | BlockMode, |
| 214 | StaticBlockLen, |
| 215 | StaticBlockInvLen(u32), |
| 216 | StaticBlockCopy(usize), |
| 217 | FixedBlock, |
| 218 | FixedBlockLengthExt(usize, u8), |
| 219 | FixedBlockDist(usize), |
| 220 | FixedBlockDistExt(usize, usize, u8), |
| 221 | FixedBlockCopy(usize, usize), |
| 222 | FixedBlockLiteral(u8), |
| 223 | DynBlockHlit, |
| 224 | DynBlockHdist, |
| 225 | DynBlockHclen, |
| 226 | DynLengths(usize), |
| 227 | DynCodeLengths, |
| 228 | DynCodeLengthsAdd(usize), |
| 229 | DynBlock, |
| 230 | DynBlockLengthExt(usize, u8), |
| 231 | DynBlockDist(usize), |
| 232 | DynBlockDistExt(usize, usize, u8), |
| 233 | DynCopy(usize, usize), |
| 234 | DynBlockLiteral(u8), |
| 235 | End, |
| 236 | } |
| 237 | |
| 238 | ///! The decompressor for deflated streams (RFC 1951). |
| 239 | pub struct Inflate { |
| 240 | br: BitReaderState, |
| 241 | fix_len_cb: Codebook<u16>, |
| 242 | |
| 243 | buf: [u8; 65536], |
| 244 | bpos: usize, |
| 245 | output_idx: usize, |
| 246 | full_pos: usize, |
| 247 | |
| 248 | state: InflateState, |
| 249 | final_block: bool, |
| 250 | hlit: usize, |
| 251 | hdist: usize, |
| 252 | dyn_len_cb: Option<Codebook<u32>>, |
| 253 | dyn_lit_cb: Option<Codebook<u32>>, |
| 254 | dyn_dist_cb: Option<Codebook<u32>>, |
| 255 | len_lengths: [u8; 19], |
| 256 | all_lengths: [u8; NUM_LITERALS + NUM_DISTS], |
| 257 | cur_len_idx: usize, |
| 258 | } |
| 259 | |
| 260 | const LENGTH_ADD_BITS: [u8; 29] = [ |
| 261 | 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, |
| 262 | 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, |
| 263 | 4, 4, 4, 4, 5, 5, 5, 5, 0 |
| 264 | ]; |
| 265 | const LENGTH_BASE: [u16; 29] = [ |
| 266 | 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, |
| 267 | 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, |
| 268 | 67, 83, 99, 115, 131, 163, 195, 227, 258 |
| 269 | ]; |
| 270 | const DIST_ADD_BITS: [u8; 30] = [ |
| 271 | 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, |
| 272 | 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, |
| 273 | 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 |
| 274 | ]; |
| 275 | const DIST_BASE: [u16; 30] = [ |
| 276 | 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, |
| 277 | 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, |
| 278 | 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577 |
| 279 | ]; |
| 280 | const LEN_RECODE: [usize; 19] = [ |
| 281 | 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 |
| 282 | ]; |
| 283 | const REPEAT_BITS: [u8; 3] = [ 2, 3, 7 ]; |
| 284 | const REPEAT_BASE: [u8; 3] = [ 3, 3, 11 ]; |
| 285 | |
| 286 | macro_rules! read_bits { |
| 287 | ($self: expr, $csrc: expr, $bits: expr) => ({ |
| 288 | if $csrc.left() < $bits as isize { |
| 289 | $self.br = $csrc.br; |
| 290 | return Err(DecompressError::ShortData); |
| 291 | } |
| 292 | $csrc.read($bits).unwrap() |
| 293 | }) |
| 294 | } |
| 295 | |
| 296 | macro_rules! read_cb { |
| 297 | ($self: expr, $csrc: expr, $cb: expr) => ({ |
| 298 | let ret = $csrc.read_cb($cb); |
| 299 | if let Err(CodebookError::MemoryError) = ret { |
| 300 | $self.br = $csrc.br; |
| 301 | return Err(DecompressError::ShortData); |
| 302 | } |
| 303 | match ret { |
| 304 | Ok(val) => val, |
| 305 | Err(_) => { |
| 306 | $self.state = InflateState::End; |
| 307 | return Err(DecompressError::InvalidData); |
| 308 | }, |
| 309 | } |
| 310 | }) |
| 311 | } |
| 312 | |
| 313 | impl Inflate { |
| 314 | ///! Creates a new instance of `Inflate` struct. |
| 315 | pub fn new() -> Self { |
| 316 | let mut cr = FixedLenCodeReader {}; |
| 317 | let fix_len_cb = Codebook::new(&mut cr, CodebookMode::LSB).unwrap(); |
| 318 | Self { |
| 319 | br: BitReaderState::default(), |
| 320 | fix_len_cb, |
| 321 | |
| 322 | buf: [0; 65536], |
| 323 | bpos: 0, |
| 324 | output_idx: 0, |
| 325 | full_pos: 0, |
| 326 | |
| 327 | state: InflateState::Start, |
| 328 | final_block: false, |
| 329 | dyn_len_cb: None, |
| 330 | dyn_lit_cb: None, |
| 331 | dyn_dist_cb: None, |
| 332 | hlit: 0, |
| 333 | hdist: 0, |
| 334 | len_lengths: [0; 19], |
| 335 | all_lengths: [0; NUM_LITERALS + NUM_DISTS], |
| 336 | cur_len_idx: 0, |
| 337 | } |
| 338 | } |
| 339 | fn put_literal(&mut self, val: u8) { |
| 340 | self.buf[self.bpos] = val; |
| 341 | self.bpos = (self.bpos + 1) & (self.buf.len() - 1); |
| 342 | self.full_pos += 1; |
| 343 | } |
| 344 | fn lz_copy(&mut self, offset: usize, len: usize, dst: &mut [u8]) -> DecompressResult<()> { |
| 345 | let mask = self.buf.len() - 1; |
| 346 | if offset > self.full_pos { |
| 347 | return Err(DecompressError::InvalidData); |
| 348 | } |
| 349 | let cstart = (self.bpos.wrapping_sub(offset)) & mask; |
| 350 | for i in 0..len { |
| 351 | self.buf[(self.bpos + i) & mask] = self.buf[(cstart + i) & mask]; |
| 352 | dst[i] = self.buf[(cstart + i) & mask]; |
| 353 | } |
| 354 | self.bpos = (self.bpos + len) & mask; |
| 355 | self.full_pos += len; |
| 356 | Ok(()) |
| 357 | } |
| 358 | ///! Reports whether decoder has finished decoding the input. |
| 359 | pub fn is_finished(&self) -> bool { |
| 360 | match self.state { |
| 361 | InflateState::End => true, |
| 362 | _ => false, |
| 363 | } |
| 364 | } |
| 365 | ///! Reports the current amount of bytes output into the destination buffer after the last run. |
| 366 | pub fn get_current_output_size(&self) -> usize { self.output_idx } |
| 367 | ///! Reports the total amount of bytes decoded so far. |
| 368 | pub fn get_total_output_size(&self) -> usize { self.bpos } |
| 369 | ///! Tries to decompress input data and write it to the output buffer. |
| 370 | ///! |
| 371 | ///! Since the decompressor can work with arbitrary input and output chunks its return value may have several meanings: |
| 372 | ///! * `Ok(len)` means the stream has been fully decoded and then number of bytes output into the destination buffer is returned. |
| 373 | ///! * [`DecompressError::ShortData`] means the input stream has been fully read but more data is needed. |
| 374 | ///! * [`DecompressError::OutputFull`] means the output buffer is full and should be flushed. Then decoding should continue on the same input block with `continue_block` parameter set to `true`. |
| 375 | ///! |
| 376 | ///! [`DecompressError::ShortData`]: ../enum.DecompressError.html#variant.ShortData |
| 377 | ///! [`DecompressError::OutputFull`]: ../enum.DecompressError.html#variant.OutputFull |
| 378 | #[allow(clippy::comparison_chain)] |
| 379 | pub fn decompress_data(&mut self, src: &[u8], dst: &mut [u8], continue_block: bool) -> DecompressResult<usize> { |
| 380 | if src.is_empty() || dst.is_empty() { |
| 381 | return Err(DecompressError::InvalidArgument); |
| 382 | } |
| 383 | let mut csrc = if !continue_block { |
| 384 | CurrentSource::new(src, self.br) |
| 385 | } else { |
| 386 | self.output_idx = 0; |
| 387 | CurrentSource::reinit(src, self.br) |
| 388 | }; |
| 389 | // check for zlib stream header |
| 390 | if let (&InflateState::Start, true) = (&self.state, src.len() > 2) { |
| 391 | let cm = src[0] & 0xF; |
| 392 | let cinfo = src[0] >> 4; |
| 393 | let hdr = (u16::from(src[0]) << 8) | u16::from(src[1]); |
| 394 | if cm == 8 && cinfo <= 7 && (hdr % 31) == 0 { |
| 395 | csrc.skip(16).unwrap(); |
| 396 | } |
| 397 | } |
| 398 | 'main: loop { |
| 399 | match self.state { |
| 400 | InflateState::Start | InflateState::BlockStart => { |
| 401 | if csrc.left() == 0 { |
| 402 | self.br = csrc.br; |
| 403 | return Err(DecompressError::ShortData); |
| 404 | } |
| 405 | self.final_block = csrc.read_bool().unwrap(); |
| 406 | self.state = InflateState::BlockMode; |
| 407 | }, |
| 408 | InflateState::BlockMode => { |
| 409 | let bmode = read_bits!(self, csrc, 2); |
| 410 | match bmode { |
| 411 | 0 => { |
| 412 | csrc.align(); |
| 413 | self.state = InflateState::StaticBlockLen; |
| 414 | }, |
| 415 | 1 => { self.state = InflateState::FixedBlock; }, |
| 416 | 2 => { self.state = InflateState::DynBlockHlit; }, |
| 417 | _ => { |
| 418 | self.state = InflateState::End; |
| 419 | return Err(DecompressError::InvalidHeader); |
| 420 | }, |
| 421 | }; |
| 422 | }, |
| 423 | InflateState::StaticBlockLen => { |
| 424 | let len = read_bits!(self, csrc, 16); |
| 425 | self.state = InflateState::StaticBlockInvLen(len); |
| 426 | }, |
| 427 | InflateState::StaticBlockInvLen(len) => { |
| 428 | let inv_len = read_bits!(self, csrc, 16); |
| 429 | if len != !inv_len { |
| 430 | self.state = InflateState::End; |
| 431 | return Err(DecompressError::InvalidHeader); |
| 432 | } |
| 433 | self.state = InflateState::StaticBlockCopy(len as usize); |
| 434 | }, |
| 435 | InflateState::StaticBlockCopy(len) => { |
| 436 | for i in 0..len { |
| 437 | if csrc.left() < 8 { |
| 438 | self.br = csrc.br; |
| 439 | self.state = InflateState::StaticBlockCopy(len - i); |
| 440 | return Err(DecompressError::ShortData); |
| 441 | } |
| 442 | let val = csrc.read(8).unwrap() as u8; |
| 443 | self.put_literal(val); |
| 444 | } |
| 445 | self.state = InflateState::BlockStart; |
| 446 | } |
| 447 | InflateState::FixedBlock => { |
| 448 | let val = read_cb!(self, csrc, &self.fix_len_cb); |
| 449 | if val < 256 { |
| 450 | if self.output_idx >= dst.len() { |
| 451 | self.br = csrc.br; |
| 452 | self.state = InflateState::FixedBlockLiteral(val as u8); |
| 453 | return Err(DecompressError::OutputFull); |
| 454 | } |
| 455 | self.put_literal(val as u8); |
| 456 | dst[self.output_idx] = val as u8; |
| 457 | self.output_idx += 1; |
| 458 | } else if val == 256 { |
| 459 | if self.final_block { |
| 460 | self.state = InflateState::End; |
| 461 | return Ok(self.output_idx); |
| 462 | } else { |
| 463 | self.state = InflateState::BlockStart; |
| 464 | } |
| 465 | } else { |
| 466 | let len_idx = (val - 257) as usize; |
| 467 | if len_idx >= LENGTH_BASE.len() { |
| 468 | self.state = InflateState::End; |
| 469 | return Err(DecompressError::InvalidData); |
| 470 | } |
| 471 | let len_bits = LENGTH_ADD_BITS[len_idx]; |
| 472 | let add_base = LENGTH_BASE[len_idx] as usize; |
| 473 | if len_bits > 0 { |
| 474 | self.state = InflateState::FixedBlockLengthExt(add_base, len_bits); |
| 475 | } else { |
| 476 | self.state = InflateState::FixedBlockDist(add_base); |
| 477 | } |
| 478 | } |
| 479 | }, |
| 480 | InflateState::FixedBlockLiteral(sym) => { |
| 481 | if self.output_idx >= dst.len() { |
| 482 | self.br = csrc.br; |
| 483 | return Err(DecompressError::OutputFull); |
| 484 | } |
| 485 | self.put_literal(sym); |
| 486 | dst[self.output_idx] = sym; |
| 487 | self.output_idx += 1; |
| 488 | self.state = InflateState::FixedBlock; |
| 489 | }, |
| 490 | InflateState::FixedBlockLengthExt(base, bits) => { |
| 491 | let add = read_bits!(self, csrc, bits) as usize; |
| 492 | self.state = InflateState::FixedBlockDist(base + add); |
| 493 | }, |
| 494 | InflateState::FixedBlockDist(length) => { |
| 495 | let dist_idx = reverse_bits(read_bits!(self, csrc, 5), 5) as usize; |
| 496 | if dist_idx >= DIST_BASE.len() { |
| 497 | self.state = InflateState::End; |
| 498 | return Err(DecompressError::InvalidData); |
| 499 | } |
| 500 | let dist_bits = DIST_ADD_BITS[dist_idx]; |
| 501 | let dist_base = DIST_BASE[dist_idx] as usize; |
| 502 | if dist_bits == 0 { |
| 503 | self.state = InflateState::FixedBlockCopy(length, dist_base); |
| 504 | } else { |
| 505 | self.state = InflateState::FixedBlockDistExt(length, dist_base, dist_bits); |
| 506 | } |
| 507 | }, |
| 508 | InflateState::FixedBlockDistExt(length, base, bits) => { |
| 509 | let add = read_bits!(self, csrc, bits) as usize; |
| 510 | self.state = InflateState::FixedBlockCopy(length, base + add); |
| 511 | }, |
| 512 | InflateState::FixedBlockCopy(length, dist) => { |
| 513 | if self.output_idx + length > dst.len() { |
| 514 | let copy_size = dst.len() - self.output_idx; |
| 515 | let ret = self.lz_copy(dist, copy_size, &mut dst[self.output_idx..]); |
| 516 | if ret.is_err() { |
| 517 | self.state = InflateState::End; |
| 518 | return Err(DecompressError::InvalidData); |
| 519 | } |
| 520 | self.output_idx += copy_size; |
| 521 | self.br = csrc.br; |
| 522 | self.state = InflateState::FixedBlockCopy(length - copy_size, dist); |
| 523 | return Err(DecompressError::OutputFull); |
| 524 | } |
| 525 | let ret = self.lz_copy(dist, length, &mut dst[self.output_idx..]); |
| 526 | if ret.is_err() { |
| 527 | self.state = InflateState::End; |
| 528 | return Err(DecompressError::InvalidData); |
| 529 | } |
| 530 | self.output_idx += length; |
| 531 | self.state = InflateState::FixedBlock; |
| 532 | } |
| 533 | InflateState::DynBlockHlit => { |
| 534 | self.hlit = (read_bits!(self, csrc, 5) as usize) + 257; |
| 535 | if self.hlit >= 287 { |
| 536 | self.state = InflateState::End; |
| 537 | return Err(DecompressError::InvalidHeader); |
| 538 | } |
| 539 | self.state = InflateState::DynBlockHdist; |
| 540 | } |
| 541 | InflateState::DynBlockHdist => { |
| 542 | self.hdist = (read_bits!(self, csrc, 5) as usize) + 1; |
| 543 | self.state = InflateState::DynBlockHclen; |
| 544 | }, |
| 545 | InflateState::DynBlockHclen => { |
| 546 | let hclen = (read_bits!(self, csrc, 4) as usize) + 4; |
| 547 | self.cur_len_idx = 0; |
| 548 | self.len_lengths = [0; 19]; |
| 549 | self.all_lengths = [0; NUM_LITERALS + NUM_DISTS]; |
| 550 | self.state = InflateState::DynLengths(hclen); |
| 551 | }, |
| 552 | InflateState::DynLengths(len) => { |
| 553 | for i in 0..len { |
| 554 | if csrc.left() < 3 { |
| 555 | self.br = csrc.br; |
| 556 | self.state = InflateState::DynLengths(len - i); |
| 557 | return Err(DecompressError::ShortData); |
| 558 | } |
| 559 | self.len_lengths[LEN_RECODE[self.cur_len_idx]] = csrc.read(3).unwrap() as u8; |
| 560 | self.cur_len_idx += 1; |
| 561 | } |
| 562 | let mut len_codes = [ShortCodebookDesc { code: 0, bits: 0 }; 19]; |
| 563 | lengths_to_codes(&self.len_lengths, &mut len_codes)?; |
| 564 | let mut cr = ShortCodebookDescReader::new(len_codes.to_vec()); |
| 565 | let ret = Codebook::new(&mut cr, CodebookMode::LSB); |
| 566 | if ret.is_err() { |
| 567 | self.state = InflateState::End; |
| 568 | return Err(DecompressError::InvalidHeader); |
| 569 | } |
| 570 | self.dyn_len_cb = Some(ret.unwrap()); |
| 571 | self.cur_len_idx = 0; |
| 572 | self.state = InflateState::DynCodeLengths; |
| 573 | }, |
| 574 | InflateState::DynCodeLengths => { |
| 575 | if let Some(ref len_cb) = self.dyn_len_cb { |
| 576 | while self.cur_len_idx < self.hlit + self.hdist { |
| 577 | let ret = csrc.read_cb(len_cb); |
| 578 | let val = match ret { |
| 579 | Ok(val) => val, |
| 580 | Err(CodebookError::MemoryError) => { |
| 581 | self.br = csrc.br; |
| 582 | return Err(DecompressError::ShortData); |
| 583 | }, |
| 584 | Err(_) => { |
| 585 | self.state = InflateState::End; |
| 586 | return Err(DecompressError::InvalidHeader); |
| 587 | }, |
| 588 | }; |
| 589 | if val < 16 { |
| 590 | self.all_lengths[self.cur_len_idx] = val as u8; |
| 591 | self.cur_len_idx += 1; |
| 592 | } else { |
| 593 | let idx = (val as usize) - 16; |
| 594 | if idx > 2 { |
| 595 | self.state = InflateState::End; |
| 596 | return Err(DecompressError::InvalidHeader); |
| 597 | } |
| 598 | self.state = InflateState::DynCodeLengthsAdd(idx); |
| 599 | continue 'main; |
| 600 | } |
| 601 | } |
| 602 | let (lit_lengths, dist_lengths) = self.all_lengths.split_at(self.hlit); |
| 603 | |
| 604 | let mut lit_codes = [ShortCodebookDesc { code: 0, bits: 0 }; NUM_LITERALS]; |
| 605 | lengths_to_codes(&lit_lengths, &mut lit_codes)?; |
| 606 | let mut cr = ShortCodebookDescReader::new(lit_codes.to_vec()); |
| 607 | let ret = Codebook::new(&mut cr, CodebookMode::LSB); |
| 608 | if ret.is_err() { return Err(DecompressError::InvalidHeader); } |
| 609 | self.dyn_lit_cb = Some(ret.unwrap()); |
| 610 | |
| 611 | let mut dist_codes = [ShortCodebookDesc { code: 0, bits: 0 }; NUM_DISTS]; |
| 612 | lengths_to_codes(&dist_lengths[..self.hdist], &mut dist_codes)?; |
| 613 | let mut cr = ShortCodebookDescReader::new(dist_codes.to_vec()); |
| 614 | let ret = Codebook::new(&mut cr, CodebookMode::LSB); |
| 615 | if ret.is_err() { return Err(DecompressError::InvalidHeader); } |
| 616 | self.dyn_dist_cb = Some(ret.unwrap()); |
| 617 | |
| 618 | self.state = InflateState::DynBlock; |
| 619 | } else { |
| 620 | unreachable!(); |
| 621 | } |
| 622 | }, |
| 623 | InflateState::DynCodeLengthsAdd(mode) => { |
| 624 | let base = REPEAT_BASE[mode] as usize; |
| 625 | let bits = REPEAT_BITS[mode]; |
| 626 | let len = base + read_bits!(self, csrc, bits) as usize; |
| 627 | if self.cur_len_idx + len > self.hlit + self.hdist { |
| 628 | self.state = InflateState::End; |
| 629 | return Err(DecompressError::InvalidHeader); |
| 630 | } |
| 631 | let rpt = if mode == 0 { |
| 632 | if self.cur_len_idx == 0 { |
| 633 | self.state = InflateState::End; |
| 634 | return Err(DecompressError::InvalidHeader); |
| 635 | } |
| 636 | self.all_lengths[self.cur_len_idx - 1] |
| 637 | } else { |
| 638 | 0 |
| 639 | }; |
| 640 | for _ in 0..len { |
| 641 | self.all_lengths[self.cur_len_idx] = rpt; |
| 642 | self.cur_len_idx += 1; |
| 643 | } |
| 644 | self.state = InflateState::DynCodeLengths; |
| 645 | }, |
| 646 | InflateState::DynBlock => { |
| 647 | if let Some(ref lit_cb) = self.dyn_lit_cb { |
| 648 | let val = read_cb!(self, csrc, lit_cb); |
| 649 | if val < 256 { |
| 650 | if self.output_idx >= dst.len() { |
| 651 | self.br = csrc.br; |
| 652 | self.state = InflateState::DynBlockLiteral(val as u8); |
| 653 | return Err(DecompressError::OutputFull); |
| 654 | } |
| 655 | self.put_literal(val as u8); |
| 656 | dst[self.output_idx] = val as u8; |
| 657 | self.output_idx += 1; |
| 658 | } else if val == 256 { |
| 659 | if self.final_block { |
| 660 | self.state = InflateState::End; |
| 661 | return Ok(self.output_idx); |
| 662 | } else { |
| 663 | self.state = InflateState::BlockStart; |
| 664 | } |
| 665 | } else { |
| 666 | let len_idx = (val - 257) as usize; |
| 667 | if len_idx >= LENGTH_BASE.len() { |
| 668 | self.state = InflateState::End; |
| 669 | return Err(DecompressError::InvalidData); |
| 670 | } |
| 671 | let len_bits = LENGTH_ADD_BITS[len_idx]; |
| 672 | let add_base = LENGTH_BASE[len_idx] as usize; |
| 673 | if len_bits > 0 { |
| 674 | self.state = InflateState::DynBlockLengthExt(add_base, len_bits); |
| 675 | } else { |
| 676 | self.state = InflateState::DynBlockDist(add_base); |
| 677 | } |
| 678 | } |
| 679 | } else { |
| 680 | unreachable!(); |
| 681 | } |
| 682 | }, |
| 683 | InflateState::DynBlockLiteral(sym) => { |
| 684 | if self.output_idx >= dst.len() { |
| 685 | self.br = csrc.br; |
| 686 | return Err(DecompressError::OutputFull); |
| 687 | } |
| 688 | self.put_literal(sym); |
| 689 | dst[self.output_idx] = sym; |
| 690 | self.output_idx += 1; |
| 691 | self.state = InflateState::DynBlock; |
| 692 | }, |
| 693 | InflateState::DynBlockLengthExt(base, bits) => { |
| 694 | let add = read_bits!(self, csrc, bits) as usize; |
| 695 | self.state = InflateState::DynBlockDist(base + add); |
| 696 | }, |
| 697 | InflateState::DynBlockDist(length) => { |
| 698 | if let Some(ref dist_cb) = self.dyn_dist_cb { |
| 699 | let dist_idx = read_cb!(self, csrc, dist_cb) as usize; |
| 700 | if dist_idx >= DIST_BASE.len() { |
| 701 | self.state = InflateState::End; |
| 702 | return Err(DecompressError::InvalidData); |
| 703 | } |
| 704 | let dist_bits = DIST_ADD_BITS[dist_idx]; |
| 705 | let dist_base = DIST_BASE[dist_idx] as usize; |
| 706 | if dist_bits == 0 { |
| 707 | self.state = InflateState::DynCopy(length, dist_base); |
| 708 | } else { |
| 709 | self.state = InflateState::DynBlockDistExt(length, dist_base, dist_bits); |
| 710 | } |
| 711 | } else { |
| 712 | unreachable!(); |
| 713 | } |
| 714 | }, |
| 715 | InflateState::DynBlockDistExt(length, base, bits) => { |
| 716 | let add = read_bits!(self, csrc, bits) as usize; |
| 717 | self.state = InflateState::DynCopy(length, base + add); |
| 718 | }, |
| 719 | InflateState::DynCopy(length, dist) => { |
| 720 | if self.output_idx + length > dst.len() { |
| 721 | let copy_size = dst.len() - self.output_idx; |
| 722 | let ret = self.lz_copy(dist, copy_size, &mut dst[self.output_idx..]); |
| 723 | if ret.is_err() { |
| 724 | self.state = InflateState::End; |
| 725 | return Err(DecompressError::InvalidData); |
| 726 | } |
| 727 | self.output_idx += copy_size; |
| 728 | self.br = csrc.br; |
| 729 | self.state = InflateState::DynCopy(length - copy_size, dist); |
| 730 | return Err(DecompressError::OutputFull); |
| 731 | } |
| 732 | let ret = self.lz_copy(dist, length, &mut dst[self.output_idx..]); |
| 733 | if ret.is_err() { |
| 734 | self.state = InflateState::End; |
| 735 | return Err(DecompressError::InvalidData); |
| 736 | } |
| 737 | self.output_idx += length; |
| 738 | self.state = InflateState::DynBlock; |
| 739 | } |
| 740 | InflateState::End => { |
| 741 | return Ok(0); |
| 742 | }, |
| 743 | } |
| 744 | } |
| 745 | } |
| 746 | ///! Decompresses input data into output returning the uncompressed data length. |
| 747 | pub fn uncompress(src: &[u8], dst: &mut [u8]) -> DecompressResult<usize> { |
| 748 | let mut inflate = Self::new(); |
| 749 | inflate.decompress_data(src, dst, false) |
| 750 | } |
| 751 | } |
| 752 | |
| 753 | impl Default for Inflate { |
| 754 | fn default() -> Self { |
| 755 | Self::new() |
| 756 | } |
| 757 | } |
| 758 | |
| 759 | fn lengths_to_codes(lens: &[u8], codes: &mut [ShortCodebookDesc]) -> DecompressResult<()> { |
| 760 | let mut bits = [0u32; 32]; |
| 761 | let mut pfx = [0u32; 33]; |
| 762 | for len in lens.iter() { |
| 763 | let len = *len as usize; |
| 764 | if len >= bits.len() { |
| 765 | return Err(DecompressError::InvalidHeader); |
| 766 | } |
| 767 | bits[len] += 1; |
| 768 | } |
| 769 | bits[0] = 0; |
| 770 | let mut code = 0; |
| 771 | for i in 0..bits.len() { |
| 772 | code = (code + bits[i]) << 1; |
| 773 | pfx[i + 1] = code; |
| 774 | } |
| 775 | |
| 776 | for (len, codes) in lens.iter().zip(codes.iter_mut()) { |
| 777 | let len = *len as usize; |
| 778 | if len != 0 { |
| 779 | let bits = len as u8; |
| 780 | *codes = ShortCodebookDesc { code: reverse_bits(pfx[len], bits), bits }; |
| 781 | pfx[len] += 1; |
| 782 | } else { |
| 783 | *codes = ShortCodebookDesc { code: 0, bits: 0 }; |
| 784 | } |
| 785 | } |
| 786 | |
| 787 | Ok(()) |
| 788 | } |
| 789 | |
| 790 | struct GzipCRC32 { |
| 791 | tab: [u32; 256], |
| 792 | crc: u32, |
| 793 | } |
| 794 | |
| 795 | impl GzipCRC32 { |
| 796 | #[allow(clippy::unreadable_literal)] |
| 797 | fn new() -> Self { |
| 798 | let mut tab = [0u32; 256]; |
| 799 | for i in 0..256 { |
| 800 | let mut c = i as u32; |
| 801 | for _ in 0..8 { |
| 802 | if (c & 1) != 0 { |
| 803 | c = 0xEDB88320 ^ (c >> 1); |
| 804 | } else { |
| 805 | c >>= 1; |
| 806 | } |
| 807 | } |
| 808 | tab[i] = c; |
| 809 | } |
| 810 | Self { tab, crc: 0 } |
| 811 | } |
| 812 | fn update_crc(&mut self, src: &[u8]) { |
| 813 | let mut c = !self.crc; |
| 814 | for el in src.iter() { |
| 815 | c = self.tab[((c ^ u32::from(*el)) & 0xFF) as usize] ^ (c >> 8); |
| 816 | } |
| 817 | self.crc = !c; |
| 818 | } |
| 819 | } |
| 820 | |
| 821 | ///! Decodes input data in gzip file format (RFC 1952) returning a vector containing decoded data. |
| 822 | pub fn gzip_decode(br: &mut ByteReader, skip_crc: bool) -> DecompressResult<Vec<u8>> { |
| 823 | const FLAG_HCRC: u8 = 0x02; |
| 824 | const FLAG_EXTRA: u8 = 0x04; |
| 825 | const FLAG_NAME: u8 = 0x08; |
| 826 | const FLAG_COMMENT: u8 = 0x10; |
| 827 | |
| 828 | let id1 = br.read_byte()?; |
| 829 | let id2 = br.read_byte()?; |
| 830 | let cm = br.read_byte()?; |
| 831 | let flg = br.read_byte()?; |
| 832 | let _mtime = br.read_u32le()?; |
| 833 | let _xfl = br.read_byte()?; |
| 834 | let _os = br.read_byte()?; |
| 835 | if id1 != 0x1F || id2 != 0x8B || cm != 8 { |
| 836 | return Err(DecompressError::InvalidHeader); |
| 837 | } |
| 838 | |
| 839 | if (flg & FLAG_EXTRA) != 0 { |
| 840 | let xlen = br.read_u16le()? as usize; |
| 841 | br.read_skip(xlen)?; |
| 842 | } |
| 843 | if (flg & FLAG_NAME) != 0 { |
| 844 | loop { |
| 845 | let b = br.read_byte()?; |
| 846 | if b == 0 { |
| 847 | break; |
| 848 | } |
| 849 | } |
| 850 | } |
| 851 | if (flg & FLAG_COMMENT) != 0 { |
| 852 | loop { |
| 853 | let b = br.read_byte()?; |
| 854 | if b == 0 { |
| 855 | break; |
| 856 | } |
| 857 | } |
| 858 | } |
| 859 | let _hcrc = if (flg & FLAG_HCRC) != 0 { |
| 860 | br.read_u16le()? |
| 861 | } else { |
| 862 | 0 |
| 863 | }; |
| 864 | if (flg & 0xE0) != 0 { |
| 865 | return Err(DecompressError::Unsupported); |
| 866 | } |
| 867 | |
| 868 | let mut output: Vec<u8> = Vec::new(); |
| 869 | let mut tail = [0u8; 8]; |
| 870 | let mut inblk = [0u8; 1024]; |
| 871 | let mut oblk = [0u8; 4096]; |
| 872 | let mut inflate = Inflate::new(); |
| 873 | let mut checker = GzipCRC32::new(); |
| 874 | |
| 875 | loop { |
| 876 | let ret = br.read_buf_some(&mut inblk); |
| 877 | if let Err(ByteIOError::EOF) = ret { |
| 878 | break; |
| 879 | } |
| 880 | let inlen = match ret { |
| 881 | Ok(val) => val, |
| 882 | Err(_) => return Err(DecompressError::IOError), |
| 883 | }; |
| 884 | let mut repeat = false; |
| 885 | loop { |
| 886 | let ret = inflate.decompress_data(&inblk[..inlen], &mut oblk, repeat); |
| 887 | match ret { |
| 888 | Ok(outlen) => { |
| 889 | checker.update_crc(&oblk[..outlen]); |
| 890 | output.extend_from_slice(&oblk[..outlen]); |
| 891 | break; |
| 892 | }, |
| 893 | Err(DecompressError::ShortData) => { |
| 894 | break; |
| 895 | }, |
| 896 | Err(DecompressError::OutputFull) => { |
| 897 | repeat = true; |
| 898 | checker.update_crc(&oblk); |
| 899 | output.extend_from_slice(&oblk); |
| 900 | }, |
| 901 | Err(err) => { |
| 902 | return Err(err); |
| 903 | }, |
| 904 | } |
| 905 | } |
| 906 | // Save last 8 bytes for CRC and size. |
| 907 | if inlen >= 8 { |
| 908 | tail.copy_from_slice(&inblk[inlen - 8..][..8]); |
| 909 | } else { |
| 910 | let shift_len = 8 - inlen; |
| 911 | for i in 0..shift_len { |
| 912 | tail[i] = tail[i + inlen]; |
| 913 | } |
| 914 | for i in shift_len..8 { |
| 915 | tail[i] = inblk[i - shift_len]; |
| 916 | } |
| 917 | } |
| 918 | } |
| 919 | if !skip_crc { |
| 920 | if !inflate.is_finished() { println!("???"); } |
| 921 | let crc = read_u32le(&tail[0..4])?; |
| 922 | let size = read_u32le(&tail[4..8])?; |
| 923 | if size != (output.len() as u32) { |
| 924 | return Err(DecompressError::CRCError); |
| 925 | } |
| 926 | if crc != checker.crc { |
| 927 | return Err(DecompressError::CRCError); |
| 928 | } |
| 929 | } |
| 930 | |
| 931 | Ok(output) |
| 932 | } |
| 933 | |
| 934 | #[derive(Clone,Copy,Default)] |
| 935 | struct Token { |
| 936 | sym: u16, |
| 937 | distsym: u8, |
| 938 | len: u16, |
| 939 | dist: u16, |
| 940 | } |
| 941 | |
| 942 | const TOKEN_EOB: Token = Token { sym: 256, distsym: 0, len: 0, dist: 0 }; |
| 943 | |
| 944 | impl Token { |
| 945 | fn from_literal(sym: u8) -> Self { |
| 946 | Self { |
| 947 | sym: u16::from(sym), |
| 948 | distsym: 0, |
| 949 | dist: 0, |
| 950 | len: 0, |
| 951 | } |
| 952 | } |
| 953 | fn from_match(dist: u16, len: u16) -> Self { |
| 954 | let sym = match len { |
| 955 | 3..= 10 => 257 + len - 3, |
| 956 | 11..= 18 => 265 + (len - 11) / 2, |
| 957 | 19..= 34 => 269 + (len - 19) / 4, |
| 958 | 35..= 66 => 273 + (len - 35) / 8, |
| 959 | 67..=130 => 277 + (len - 67) / 16, |
| 960 | 131..=257 => 281 + (len - 131) / 32, |
| 961 | _ => 285, |
| 962 | }; |
| 963 | let distsym = if dist <= 4 { |
| 964 | (dist - 1) as u8 |
| 965 | } else { |
| 966 | let bits = 16 - (dist - 1).leading_zeros(); |
| 967 | (bits as u8) * 2 - 2 + if ((dist - 1) & (1 << (bits - 2))) != 0 { 1 } else { 0 } |
| 968 | }; |
| 969 | Self { |
| 970 | sym, distsym, len, dist |
| 971 | } |
| 972 | } |
| 973 | } |
| 974 | |
| 975 | fn add_codes(lens: &[u8], stats: &mut [u32], toks: &mut Vec<(u8, u8)>) { |
| 976 | let mut last = 42; |
| 977 | let mut lcount = 0; |
| 978 | |
| 979 | for &len in lens.iter() { |
| 980 | if len == last { |
| 981 | lcount += 1; |
| 982 | } else { |
| 983 | if last == 0 { |
| 984 | while lcount > 10 { |
| 985 | let run = lcount.min(138); |
| 986 | stats[18] += 1; |
| 987 | toks.push((18, run - 11)); |
| 988 | lcount -= run; |
| 989 | } |
| 990 | if lcount >= 3 { |
| 991 | stats[17] += 1; |
| 992 | toks.push((17, lcount - 3)); |
| 993 | lcount = 0; |
| 994 | } |
| 995 | for _ in 0..lcount { |
| 996 | stats[0] += 1; |
| 997 | toks.push((0, 0)); |
| 998 | } |
| 999 | } else { |
| 1000 | while lcount >= 3 { |
| 1001 | let run = lcount.min(6); |
| 1002 | stats[16] += 1; |
| 1003 | toks.push((16, run - 3)); |
| 1004 | lcount -= run; |
| 1005 | } |
| 1006 | for _ in 0..lcount { |
| 1007 | stats[last as usize] += 1; |
| 1008 | toks.push((last, 0)); |
| 1009 | } |
| 1010 | } |
| 1011 | stats[len as usize] += 1; |
| 1012 | toks.push((len, 0)); |
| 1013 | last = len; |
| 1014 | lcount = 0; |
| 1015 | } |
| 1016 | } |
| 1017 | if lcount > 0 { |
| 1018 | if last == 0 { |
| 1019 | while lcount > 10 { |
| 1020 | let run = lcount.min(138); |
| 1021 | stats[18] += 1; |
| 1022 | toks.push((18, run - 11)); |
| 1023 | lcount -= run; |
| 1024 | } |
| 1025 | if lcount >= 3 { |
| 1026 | stats[17] += 1; |
| 1027 | toks.push((17, lcount - 3)); |
| 1028 | lcount = 0; |
| 1029 | } |
| 1030 | for _ in 0..lcount { |
| 1031 | stats[0] += 1; |
| 1032 | toks.push((0, 0)); |
| 1033 | } |
| 1034 | } else { |
| 1035 | while lcount >= 3 { |
| 1036 | let run = lcount.min(6); |
| 1037 | stats[16] += 1; |
| 1038 | toks.push((16, run - 3)); |
| 1039 | lcount -= run; |
| 1040 | } |
| 1041 | for _ in 0..lcount { |
| 1042 | stats[last as usize] += 1; |
| 1043 | toks.push((last, 0)); |
| 1044 | } |
| 1045 | } |
| 1046 | } |
| 1047 | } |
| 1048 | |
| 1049 | ///! Deflate stream writer. |
| 1050 | pub struct DeflateWriter { |
| 1051 | dst: Vec<u8>, |
| 1052 | bits: u8, |
| 1053 | bbuf: u32, |
| 1054 | } |
| 1055 | |
| 1056 | impl DeflateWriter { |
| 1057 | ///! Creates a new instance of `DeflateWriter` for a provided output. |
| 1058 | pub fn new(dst: Vec<u8>) -> Self { |
| 1059 | Self { |
| 1060 | dst, |
| 1061 | bits: 0, |
| 1062 | bbuf: 0, |
| 1063 | } |
| 1064 | } |
| 1065 | fn align(&mut self) { |
| 1066 | if (self.bits & 7) != 0 { |
| 1067 | self.bits += 8 - (self.bits & 7); |
| 1068 | } |
| 1069 | } |
| 1070 | fn flush(&mut self) { |
| 1071 | while self.bits >= 8 { |
| 1072 | self.dst.push(self.bbuf as u8); |
| 1073 | self.bbuf >>= 8; |
| 1074 | self.bits -= 8; |
| 1075 | } |
| 1076 | } |
| 1077 | fn write(&mut self, val: u16, len: u8) { |
| 1078 | self.flush(); |
| 1079 | self.bbuf |= u32::from(val) << self.bits; |
| 1080 | self.bits += len; |
| 1081 | } |
| 1082 | ///! Finishes writing the stream and returns the output vector. |
| 1083 | pub fn end(mut self) -> Vec<u8> { |
| 1084 | self.flush(); |
| 1085 | if self.bits > 0 { |
| 1086 | self.dst.push(self.bbuf as u8); |
| 1087 | } |
| 1088 | self.dst |
| 1089 | } |
| 1090 | |
| 1091 | fn write_codes(&mut self, codes: &CodeHuff, dists: &DistHuff) { |
| 1092 | let mut stats = [0u32; 19]; |
| 1093 | let mut toks = Vec::with_capacity(NUM_LITERALS + NUM_DISTS); |
| 1094 | let mut cw = [0u16; 19]; |
| 1095 | let mut cl = [0u8; 19]; |
| 1096 | let mut nc = 0; |
| 1097 | |
| 1098 | add_codes(&codes.lens[..codes.num_codes], &mut stats, &mut toks); |
| 1099 | add_codes(&dists.lens[..dists.num_codes], &mut stats, &mut toks); |
| 1100 | |
| 1101 | gen_tree(&mut cw, &mut cl, &mut nc, &mut stats, 7); |
| 1102 | |
| 1103 | nc = cw.len(); |
| 1104 | for &idx in LEN_RECODE.iter().rev() { |
| 1105 | if cl[idx] == 0 { |
| 1106 | nc -= 1; |
| 1107 | } else { |
| 1108 | break; |
| 1109 | } |
| 1110 | } |
| 1111 | if nc < 4 { |
| 1112 | nc = 4; |
| 1113 | } |
| 1114 | self.write((nc - 4) as u16, 4); |
| 1115 | for &idx in LEN_RECODE.iter().take(nc) { |
| 1116 | self.write(u16::from(cl[idx]), 3); |
| 1117 | } |
| 1118 | for &(sym, add) in toks.iter() { |
| 1119 | self.write(cw[sym as usize], cl[sym as usize]); |
| 1120 | match sym { |
| 1121 | 16 => self.write(u16::from(add), 2), |
| 1122 | 17 => self.write(u16::from(add), 3), |
| 1123 | 18 => self.write(u16::from(add), 7), |
| 1124 | _ => {}, |
| 1125 | }; |
| 1126 | } |
| 1127 | } |
| 1128 | fn write_tokens(&mut self, src: &[Token], codes: &CodeHuff, dists: &DistHuff) { |
| 1129 | for &tok in src.iter() { |
| 1130 | self.write(codes.codes[tok.sym as usize], codes.lens[tok.sym as usize]); |
| 1131 | if tok.sym > 256 { |
| 1132 | self.write_len_bits(tok.len); |
| 1133 | self.write(dists.codes[tok.distsym as usize], dists.lens[tok.distsym as usize]); |
| 1134 | self.write_dist_bits(tok.dist); |
| 1135 | } |
| 1136 | } |
| 1137 | } |
| 1138 | fn write_len_bits(&mut self, len: u16) { |
| 1139 | let llen = len - 3; |
| 1140 | if llen >= 8 && llen < 255 { |
| 1141 | let bits = (16 - llen.leading_zeros() - 3) as u8; |
| 1142 | self.write(llen & ((1 << bits) - 1), bits); |
| 1143 | } |
| 1144 | } |
| 1145 | fn write_dist_bits(&mut self, dist: u16) { |
| 1146 | let ddist = dist - 1; |
| 1147 | if dist >= 4 { |
| 1148 | let bits = (16 - ddist.leading_zeros() - 2) as u8; |
| 1149 | self.write(ddist & ((1 << bits) - 1), bits); |
| 1150 | } |
| 1151 | } |
| 1152 | } |
| 1153 | |
| 1154 | struct CodeHuff { |
| 1155 | is_fixed: bool, |
| 1156 | stats: [u32; NUM_LITERALS], |
| 1157 | codes: [u16; NUM_LITERALS], |
| 1158 | lens: [u8; NUM_LITERALS], |
| 1159 | num_codes: usize, |
| 1160 | } |
| 1161 | |
| 1162 | impl CodeHuff { |
| 1163 | fn new(is_fixed: bool) -> Self { |
| 1164 | Self { |
| 1165 | is_fixed, |
| 1166 | stats: [0; NUM_LITERALS], |
| 1167 | codes: [0; NUM_LITERALS], |
| 1168 | lens: [0; NUM_LITERALS], |
| 1169 | num_codes: NUM_LITERALS, |
| 1170 | } |
| 1171 | } |
| 1172 | fn make_codes(&mut self, src: &[Token]) { |
| 1173 | if self.is_fixed { |
| 1174 | for i in 0..=143 { |
| 1175 | self.codes[i] = reverse_bits((i + 0x30) as u32, 8) as u16; |
| 1176 | self.lens[i] = 8; |
| 1177 | } |
| 1178 | for i in 144..=255 { |
| 1179 | self.codes[i] = reverse_bits((i + 0x100) as u32, 9) as u16; |
| 1180 | self.lens[i] = 9; |
| 1181 | } |
| 1182 | for i in 256..=279 { |
| 1183 | self.codes[i] = reverse_bits((i & 0x1F) as u32, 7) as u16; |
| 1184 | self.lens[i] = 7; |
| 1185 | } |
| 1186 | for i in 280..NUM_LITERALS { |
| 1187 | self.codes[i] = reverse_bits((i - 280 + 0xC0) as u32, 8) as u16; |
| 1188 | self.lens[i] = 8; |
| 1189 | } |
| 1190 | } else { |
| 1191 | for &tok in src.iter() { |
| 1192 | self.stats[tok.sym as usize] += 1; |
| 1193 | } |
| 1194 | gen_tree(&mut self.codes, &mut self.lens, &mut self.num_codes, &mut self.stats, 15); |
| 1195 | if self.num_codes < 257 { |
| 1196 | self.num_codes = 257; |
| 1197 | } |
| 1198 | } |
| 1199 | } |
| 1200 | } |
| 1201 | |
| 1202 | struct DistHuff { |
| 1203 | is_fixed: bool, |
| 1204 | stats: [u32; NUM_DISTS], |
| 1205 | codes: [u16; NUM_DISTS], |
| 1206 | lens: [u8; NUM_DISTS], |
| 1207 | num_codes: usize, |
| 1208 | } |
| 1209 | |
| 1210 | impl DistHuff { |
| 1211 | fn new(is_fixed: bool) -> Self { |
| 1212 | Self { |
| 1213 | is_fixed, |
| 1214 | stats: [0; NUM_DISTS], |
| 1215 | codes: [0; NUM_DISTS], |
| 1216 | lens: [0; NUM_DISTS], |
| 1217 | num_codes: NUM_DISTS, |
| 1218 | } |
| 1219 | } |
| 1220 | fn make_codes(&mut self, src: &[Token]) { |
| 1221 | if self.is_fixed { |
| 1222 | for i in 0..NUM_DISTS { |
| 1223 | self.codes[i] = reverse_bits(i as u32, 5) as u16; |
| 1224 | self.lens[i] = 5; |
| 1225 | } |
| 1226 | } else { |
| 1227 | for &tok in src.iter() { |
| 1228 | if tok.sym > 256 { |
| 1229 | self.stats[tok.distsym as usize] += 1; |
| 1230 | } |
| 1231 | } |
| 1232 | gen_tree(&mut self.codes, &mut self.lens, &mut self.num_codes, &mut self.stats, 15); |
| 1233 | if self.num_codes < 1 { |
| 1234 | self.num_codes = 1; |
| 1235 | } |
| 1236 | } |
| 1237 | } |
| 1238 | } |
| 1239 | |
| 1240 | #[derive(Clone,Copy,Default)] |
| 1241 | struct Node { |
| 1242 | sym: u16, |
| 1243 | w: u16, |
| 1244 | idx0: u16, |
| 1245 | idx1: u16, |
| 1246 | } |
| 1247 | |
| 1248 | const NODE_SYM: u16 = 65500; |
| 1249 | |
| 1250 | struct Tree { |
| 1251 | nodes: [Node; NUM_LITERALS * 2], |
| 1252 | nnodes: usize, |
| 1253 | } |
| 1254 | |
| 1255 | impl Tree { |
| 1256 | fn new() -> Self { |
| 1257 | Self { |
| 1258 | nodes: [Node::default(); NUM_LITERALS * 2], |
| 1259 | nnodes: 0, |
| 1260 | } |
| 1261 | } |
| 1262 | fn insert(&mut self, val: Node) { |
| 1263 | let mut idx = self.nnodes; |
| 1264 | for (i, nn) in self.nodes[..self.nnodes].iter().enumerate() { |
| 1265 | if nn.w > val.w { |
| 1266 | idx = i; |
| 1267 | break; |
| 1268 | } |
| 1269 | } |
| 1270 | if idx < self.nnodes { |
| 1271 | for i in (idx..self.nnodes).rev() { |
| 1272 | self.nodes[i + 1] = self.nodes[i]; |
| 1273 | } |
| 1274 | } |
| 1275 | self.nodes[idx] = val; |
| 1276 | self.nnodes += 1; |
| 1277 | } |
| 1278 | fn trim(&mut self) { |
| 1279 | let mut start = self.nnodes; |
| 1280 | for (i, n) in self.nodes[..self.nnodes].iter().enumerate() { |
| 1281 | if n.w != 0 { |
| 1282 | start = i; |
| 1283 | break; |
| 1284 | } |
| 1285 | } |
| 1286 | if start != 0 { |
| 1287 | for i in 0..self.nnodes - start { |
| 1288 | self.nodes[i] = self.nodes[i + start]; |
| 1289 | } |
| 1290 | self.nnodes -= start; |
| 1291 | } |
| 1292 | } |
| 1293 | fn build(&mut self) { |
| 1294 | if self.nnodes == 1 { |
| 1295 | self.nodes[0].w = 1; |
| 1296 | return; |
| 1297 | } |
| 1298 | let mut start = 0; |
| 1299 | while start + 1 < self.nnodes { |
| 1300 | let nn1 = self.nodes[start]; |
| 1301 | let nn2 = self.nodes[start + 1]; |
| 1302 | let n = Node { |
| 1303 | sym: NODE_SYM, |
| 1304 | w: nn1.w + nn2.w, |
| 1305 | idx0: start as u16, |
| 1306 | idx1: (start + 1) as u16, |
| 1307 | }; |
| 1308 | self.nodes[start].w = 0; |
| 1309 | self.nodes[start + 1].w = 0; |
| 1310 | start += 2; |
| 1311 | self.insert(n); |
| 1312 | } |
| 1313 | if self.nnodes > 1 { |
| 1314 | self.assign_len(self.nnodes - 1, 0); |
| 1315 | } |
| 1316 | } |
| 1317 | fn assign_len(&mut self, idx: usize, len: u16) { |
| 1318 | if self.nodes[idx].sym == NODE_SYM { |
| 1319 | self.assign_len(self.nodes[idx].idx0 as usize, len + 1); |
| 1320 | self.assign_len(self.nodes[idx].idx1 as usize, len + 1); |
| 1321 | } else { |
| 1322 | self.nodes[idx].w = len; |
| 1323 | } |
| 1324 | } |
| 1325 | } |
| 1326 | |
| 1327 | fn gen_tree(codes: &mut [u16], lens: &mut [u8], num_codes: &mut usize, stats: &mut [u32], max_bits: u8) { |
| 1328 | let mut tot_w = 0; |
| 1329 | for &w in stats.iter() { |
| 1330 | tot_w += w; |
| 1331 | } |
| 1332 | if tot_w == 0 { |
| 1333 | codes[0] = 0; |
| 1334 | lens[0] = 0; |
| 1335 | *num_codes = 0; |
| 1336 | return; |
| 1337 | } |
| 1338 | while tot_w > (1 << max_bits) { |
| 1339 | for w in stats.iter_mut() { |
| 1340 | *w = (*w + 1) >> 1; |
| 1341 | } |
| 1342 | tot_w = 0; |
| 1343 | for &w in stats.iter() { |
| 1344 | tot_w += w; |
| 1345 | } |
| 1346 | } |
| 1347 | let mut tree = Tree::new(); |
| 1348 | for (sym, &w) in stats.iter().enumerate() { |
| 1349 | tree.insert(Node{ sym: sym as u16, w: w as u16, idx0: 64000, idx1: 64000 }); |
| 1350 | } |
| 1351 | tree.trim(); |
| 1352 | tree.build(); |
| 1353 | |
| 1354 | for n in tree.nodes[..tree.nnodes].iter() { |
| 1355 | if n.sym != NODE_SYM { |
| 1356 | lens[n.sym as usize] = n.w as u8; |
| 1357 | } |
| 1358 | } |
| 1359 | lengths_to_codes16(lens, codes); |
| 1360 | let mut sz = codes.len(); |
| 1361 | for &len in lens.iter().rev() { |
| 1362 | if len != 0 { |
| 1363 | break; |
| 1364 | } |
| 1365 | sz -= 1; |
| 1366 | } |
| 1367 | *num_codes = sz; |
| 1368 | } |
| 1369 | |
| 1370 | fn lengths_to_codes16(lens: &[u8], codes: &mut [u16]) { |
| 1371 | let mut bits = [0u32; 32]; |
| 1372 | let mut pfx = [0u32; 33]; |
| 1373 | for len in lens.iter() { |
| 1374 | let len = *len as usize; |
| 1375 | bits[len] += 1; |
| 1376 | } |
| 1377 | bits[0] = 0; |
| 1378 | let mut code = 0; |
| 1379 | for i in 0..bits.len() { |
| 1380 | code = (code + bits[i]) << 1; |
| 1381 | pfx[i + 1] = code; |
| 1382 | } |
| 1383 | |
| 1384 | for (len, codes) in lens.iter().zip(codes.iter_mut()) { |
| 1385 | let len = *len as usize; |
| 1386 | if len != 0 { |
| 1387 | let bits = len as u8; |
| 1388 | *codes = reverse_bits(pfx[len], bits) as u16; |
| 1389 | pfx[len] += 1; |
| 1390 | } else { |
| 1391 | *codes = 0; |
| 1392 | } |
| 1393 | } |
| 1394 | } |
| 1395 | |
| 1396 | trait LZParse { |
| 1397 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>); |
| 1398 | } |
| 1399 | |
| 1400 | struct NoParser {} |
| 1401 | impl LZParse for NoParser { |
| 1402 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>) { |
| 1403 | dst.reserve(src.len()); |
| 1404 | for &b in src.iter() { |
| 1405 | dst.push(Token::from_literal(b)); |
| 1406 | } |
| 1407 | dst.push(TOKEN_EOB); |
| 1408 | } |
| 1409 | } |
| 1410 | |
| 1411 | fn check_match(src1: &[u8], src2: &[u8]) -> u16 { |
| 1412 | let mut len = 0; |
| 1413 | for (&a, &b) in src1.iter().zip(src2.iter()) { |
| 1414 | if a != b { |
| 1415 | break; |
| 1416 | } |
| 1417 | len += 1; |
| 1418 | } |
| 1419 | len |
| 1420 | } |
| 1421 | |
| 1422 | const HASH_SIZE: usize = 4096; |
| 1423 | const MAX_MATCH_LEN: usize = 258; |
| 1424 | const WINDOW_SIZE: usize = 32768 - MAX_MATCH_LEN; |
| 1425 | const NONEXT: usize = WINDOW_SIZE * 2; |
| 1426 | |
| 1427 | struct MatchFinder<'a> { |
| 1428 | src: &'a [u8], |
| 1429 | pos: usize, |
| 1430 | hstart: [usize; HASH_SIZE], |
| 1431 | hend: [usize; HASH_SIZE], |
| 1432 | hnext: [usize; WINDOW_SIZE * 3], |
| 1433 | } |
| 1434 | |
| 1435 | impl<'a> MatchFinder<'a> { |
| 1436 | fn new(src: &'a [u8]) -> Self { |
| 1437 | let mut obj = Self { |
| 1438 | src, |
| 1439 | pos: 0, |
| 1440 | hstart: [0; HASH_SIZE], |
| 1441 | hend: [0; HASH_SIZE], |
| 1442 | hnext: [0; WINDOW_SIZE * 3], |
| 1443 | }; |
| 1444 | obj.build_hash(); |
| 1445 | obj |
| 1446 | } |
| 1447 | fn hash(src: &[u8]) -> usize { |
| 1448 | let _ = src[2]; |
| 1449 | (((u16::from(src[0]) << 10) ^ (u16::from(src[1]) << 5) ^ u16::from(src[2])) & ((HASH_SIZE as u16) - 1)) as usize |
| 1450 | } |
| 1451 | fn build_hash(&mut self) { |
| 1452 | for el in self.hstart.iter_mut() { *el = NONEXT; } |
| 1453 | for el in self.hend.iter_mut() { *el = NONEXT; } |
| 1454 | for el in self.hnext.iter_mut() { *el = NONEXT; } |
| 1455 | if self.pos + 3 >= self.src.len() { |
| 1456 | return; |
| 1457 | } |
| 1458 | let end = (self.src.len() - 3).min(self.pos + NONEXT); |
| 1459 | for i in (self.pos .. end).rev() { |
| 1460 | let key = Self::hash(&self.src[i..]); |
| 1461 | if self.hstart[key] == NONEXT { |
| 1462 | self.hstart[key] = i; |
| 1463 | self.hend[key] = i; |
| 1464 | self.hnext[key] = NONEXT; |
| 1465 | } else { |
| 1466 | self.hnext[self.hend[key]] = i; |
| 1467 | self.hend[key] = i; |
| 1468 | } |
| 1469 | } |
| 1470 | } |
| 1471 | fn find_match(&mut self) -> (u16, u16) { |
| 1472 | if self.pos == 0 || self.pos + 3 > self.src.len() { |
| 1473 | return (0, 0); |
| 1474 | } |
| 1475 | let key = Self::hash(&self.src[self.pos..]) as usize; |
| 1476 | |
| 1477 | let mut best_pos = 0; |
| 1478 | let mut best_len = 0; |
| 1479 | let mut idx = self.hstart[key]; |
| 1480 | while idx != NONEXT && idx + WINDOW_SIZE > self.pos { |
| 1481 | if idx < self.pos { |
| 1482 | let cur_len = check_match(&self.src[self.pos..], &self.src[idx..]); |
| 1483 | if cur_len > best_len { |
| 1484 | best_len = cur_len; |
| 1485 | best_pos = self.pos - idx; |
| 1486 | if best_len >= (MAX_MATCH_LEN as u16) { |
| 1487 | return (best_pos as u16, MAX_MATCH_LEN as u16); |
| 1488 | } |
| 1489 | } |
| 1490 | } |
| 1491 | idx = self.hnext[idx]; |
| 1492 | } |
| 1493 | (best_pos as u16, best_len) |
| 1494 | } |
| 1495 | fn find_all_matches(&mut self, dists: &mut [u16; MAX_MATCH_LEN + 1]) { |
| 1496 | if self.pos == 0 || self.pos + 3 > self.src.len() { |
| 1497 | return; |
| 1498 | } |
| 1499 | let key = Self::hash(&self.src[self.pos..]) as usize; |
| 1500 | let mut idx = self.hstart[key]; |
| 1501 | while idx != NONEXT && idx + WINDOW_SIZE > self.pos { |
| 1502 | if idx < self.pos { |
| 1503 | let cur_len = (check_match(&self.src[self.pos..], &self.src[idx..]) as usize).min(MAX_MATCH_LEN); |
| 1504 | if cur_len > 0 && dists[cur_len] == 0 { |
| 1505 | dists[cur_len] = (self.pos - idx) as u16; |
| 1506 | } |
| 1507 | } |
| 1508 | idx = self.hnext[idx]; |
| 1509 | } |
| 1510 | } |
| 1511 | fn advance(&mut self, num: usize) { |
| 1512 | self.pos += num; |
| 1513 | |
| 1514 | if self.pos >= NONEXT { |
| 1515 | let (_, tail) = self.src.split_at(self.pos - WINDOW_SIZE); |
| 1516 | self.src = tail; |
| 1517 | self.pos = WINDOW_SIZE; |
| 1518 | self.build_hash(); |
| 1519 | } else { |
| 1520 | for (start, end) in self.hstart.iter_mut().zip(self.hend.iter_mut()) { |
| 1521 | let mut idx = *start; |
| 1522 | while idx != NONEXT && idx + WINDOW_SIZE < self.pos { |
| 1523 | idx = self.hnext[idx]; |
| 1524 | *start = idx; |
| 1525 | } |
| 1526 | if idx == NONEXT { |
| 1527 | *end = NONEXT; |
| 1528 | } |
| 1529 | } |
| 1530 | } |
| 1531 | } |
| 1532 | fn get_sym(&self) -> u8 { self.src[self.pos] } |
| 1533 | fn is_end(&self) -> bool { self.pos >= self.src.len() } |
| 1534 | } |
| 1535 | |
| 1536 | struct GreedyParser {} |
| 1537 | impl LZParse for GreedyParser { |
| 1538 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>) { |
| 1539 | dst.reserve(src.len()); |
| 1540 | |
| 1541 | let mut matcher = MatchFinder::new(src); |
| 1542 | while !matcher.is_end() { |
| 1543 | let (best_pos, best_len) = matcher.find_match(); |
| 1544 | |
| 1545 | if best_len >= 3 { |
| 1546 | dst.push(Token::from_match(best_pos, best_len)); |
| 1547 | matcher.advance(best_len as usize); |
| 1548 | } else { |
| 1549 | dst.push(Token::from_literal(matcher.get_sym())); |
| 1550 | matcher.advance(1); |
| 1551 | } |
| 1552 | } |
| 1553 | dst.push(TOKEN_EOB); |
| 1554 | } |
| 1555 | } |
| 1556 | |
| 1557 | struct LazyParser {} |
| 1558 | impl LZParse for LazyParser { |
| 1559 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>) { |
| 1560 | dst.reserve(src.len()); |
| 1561 | |
| 1562 | let mut matcher = MatchFinder::new(src); |
| 1563 | while !matcher.is_end() { |
| 1564 | let (best_pos, best_len) = matcher.find_match(); |
| 1565 | if best_len >= 3 { |
| 1566 | let last_sym = matcher.get_sym(); |
| 1567 | matcher.advance(1); |
| 1568 | if !matcher.is_end() { |
| 1569 | let (best_pos1, best_len1) = matcher.find_match(); |
| 1570 | if best_len1 > best_len + 1 { |
| 1571 | dst.push(Token::from_literal(last_sym)); |
| 1572 | dst.push(Token::from_match(best_pos1, best_len1)); |
| 1573 | matcher.advance(best_len1 as usize); |
| 1574 | continue; |
| 1575 | } |
| 1576 | } |
| 1577 | dst.push(Token::from_match(best_pos, best_len)); |
| 1578 | matcher.advance((best_len - 1) as usize); |
| 1579 | } else { |
| 1580 | dst.push(Token::from_literal(matcher.get_sym())); |
| 1581 | matcher.advance(1); |
| 1582 | } |
| 1583 | } |
| 1584 | dst.push(TOKEN_EOB); |
| 1585 | } |
| 1586 | } |
| 1587 | |
| 1588 | #[derive(Clone,Copy)] |
| 1589 | struct TNode { |
| 1590 | price: u32, |
| 1591 | dist: u16, |
| 1592 | link: usize, |
| 1593 | } |
| 1594 | |
| 1595 | impl Default for TNode { |
| 1596 | fn default() -> Self { |
| 1597 | Self { |
| 1598 | price: std::u32::MAX, |
| 1599 | dist: 0, |
| 1600 | link: 0, |
| 1601 | } |
| 1602 | } |
| 1603 | } |
| 1604 | |
| 1605 | struct OptimalParser { |
| 1606 | trellis: Vec<TNode>, |
| 1607 | } |
| 1608 | impl OptimalParser { |
| 1609 | fn new() -> Self { Self::default() } |
| 1610 | fn sym_price(_sym: u8) -> u32 { 9 } |
| 1611 | fn match_price(dist: u16, _len: u16) -> u32 { |
| 1612 | if dist <= 4 { |
| 1613 | 9 + 5 |
| 1614 | } else { |
| 1615 | let bits = 16 - (dist - 1).leading_zeros(); |
| 1616 | 9 + 3 + bits |
| 1617 | } |
| 1618 | } |
| 1619 | } |
| 1620 | impl Default for OptimalParser { |
| 1621 | fn default() -> Self { |
| 1622 | Self { |
| 1623 | trellis: Vec::with_capacity(WINDOW_SIZE), |
| 1624 | } |
| 1625 | } |
| 1626 | } |
| 1627 | impl LZParse for OptimalParser { |
| 1628 | fn parse(&mut self, src: &[u8], dst: &mut Vec<Token>) { |
| 1629 | if src.is_empty() { |
| 1630 | dst.push(TOKEN_EOB); |
| 1631 | return; |
| 1632 | } |
| 1633 | dst.reserve(src.len()); |
| 1634 | |
| 1635 | self.trellis.truncate(0); |
| 1636 | self.trellis.reserve(src.len() + 1); |
| 1637 | for _ in 0..=src.len() { |
| 1638 | self.trellis.push(TNode::default()); |
| 1639 | } |
| 1640 | self.trellis[0].price = 0; |
| 1641 | |
| 1642 | let mut matcher = MatchFinder::new(src); |
| 1643 | for i in 0..self.trellis.len() - 1 { |
| 1644 | let mut dists = [0; MAX_MATCH_LEN + 1]; |
| 1645 | matcher.find_all_matches(&mut dists); |
| 1646 | |
| 1647 | let sym = matcher.get_sym(); |
| 1648 | let lprice = Self::sym_price(sym) + self.trellis[i].price; |
| 1649 | if self.trellis[i + 1].price > lprice { |
| 1650 | self.trellis[i + 1].price = lprice; |
| 1651 | self.trellis[i + 1].link = i; |
| 1652 | } |
| 1653 | for (len, &dist) in dists.iter().enumerate() { |
| 1654 | if dist != 0 { |
| 1655 | let mprice = Self::match_price(dist, len as u16) + self.trellis[i].price; |
| 1656 | if self.trellis[i + len].price > mprice { |
| 1657 | self.trellis[i + len].price = mprice; |
| 1658 | self.trellis[i + len].link = i; |
| 1659 | self.trellis[i].dist = dist; |
| 1660 | } |
| 1661 | } |
| 1662 | } |
| 1663 | |
| 1664 | matcher.advance(1); |
| 1665 | } |
| 1666 | let mut idx = self.trellis.len() - 1; |
| 1667 | let mut nidx = self.trellis[idx].link; |
| 1668 | while idx > 0 { |
| 1669 | let oidx = idx; |
| 1670 | idx = nidx; |
| 1671 | nidx = self.trellis[idx].link; |
| 1672 | self.trellis[idx].link = oidx; |
| 1673 | } |
| 1674 | |
| 1675 | let mut idx = 0; |
| 1676 | while idx < self.trellis.len() - 1 { |
| 1677 | let len = self.trellis[idx].link - idx; |
| 1678 | if len == 1 { |
| 1679 | dst.push(Token::from_literal(src[idx])); |
| 1680 | } else { |
| 1681 | dst.push(Token::from_match(self.trellis[idx].dist, len as u16)); |
| 1682 | } |
| 1683 | idx = self.trellis[idx].link; |
| 1684 | } |
| 1685 | |
| 1686 | dst.push(TOKEN_EOB); |
| 1687 | } |
| 1688 | } |
| 1689 | |
| 1690 | ///! Deflate compression mode. |
| 1691 | #[derive(Clone,Copy,Debug,PartialEq)] |
| 1692 | pub enum DeflateMode { |
| 1693 | ///! No compression. |
| 1694 | NoCompr, |
| 1695 | ///! Fast compression. |
| 1696 | Fast, |
| 1697 | ///! Still fast but better compression. |
| 1698 | Better, |
| 1699 | ///! Slow but the best compression. |
| 1700 | Best, |
| 1701 | } |
| 1702 | |
| 1703 | #[derive(Clone,Copy,Debug,PartialEq)] |
| 1704 | enum Mode { |
| 1705 | Copy, |
| 1706 | Fixed, |
| 1707 | Dynamic, |
| 1708 | } |
| 1709 | |
| 1710 | const MAX_BLOCK_SIZE: usize = 65535; |
| 1711 | |
| 1712 | ///! Deflate stream compressor. |
| 1713 | pub struct Deflate { |
| 1714 | mode: Mode, |
| 1715 | tokens: Vec<Token>, |
| 1716 | srcbuf: [u8; MAX_BLOCK_SIZE], |
| 1717 | ssize: usize, |
| 1718 | sum1: u32, |
| 1719 | sum2: u32, |
| 1720 | zlib_mode: bool, |
| 1721 | parser: Box<dyn LZParse>, |
| 1722 | } |
| 1723 | |
| 1724 | impl Deflate { |
| 1725 | ///! Creates a new instance of `Deflate`. |
| 1726 | pub fn new(mode: DeflateMode) -> Self { |
| 1727 | let (mode, parser) = match mode { |
| 1728 | DeflateMode::NoCompr => (Mode::Copy, Box::new(NoParser{}) as Box<dyn LZParse>), |
| 1729 | DeflateMode::Fast => (Mode::Fixed, Box::new(GreedyParser{}) as Box<dyn LZParse>), |
| 1730 | DeflateMode::Better => (Mode::Dynamic, Box::new(LazyParser{}) as Box<dyn LZParse>), |
| 1731 | DeflateMode::Best => (Mode::Dynamic, Box::new(OptimalParser::new()) as Box<dyn LZParse>), |
| 1732 | }; |
| 1733 | Self { |
| 1734 | mode, parser, |
| 1735 | tokens: Vec::with_capacity(MAX_BLOCK_SIZE), |
| 1736 | srcbuf: [0; MAX_BLOCK_SIZE], |
| 1737 | ssize: 0, |
| 1738 | sum1: 1, |
| 1739 | sum2: 0, |
| 1740 | zlib_mode: false, |
| 1741 | } |
| 1742 | } |
| 1743 | ///! Writes zlib stream header. |
| 1744 | pub fn write_zlib_header(&mut self, wr: &mut DeflateWriter) { |
| 1745 | wr.write(8, 4); |
| 1746 | wr.write(7, 4); |
| 1747 | let level = match self.mode { |
| 1748 | Mode::Copy => 0x01, |
| 1749 | Mode::Fixed => 0x5E, |
| 1750 | Mode::Dynamic => 0x9C, |
| 1751 | // 0xDA for the strongest one |
| 1752 | }; |
| 1753 | wr.write(level, 8); |
| 1754 | self.zlib_mode = true; |
| 1755 | } |
| 1756 | fn write_zlib_footer(&self, wr: &mut DeflateWriter) { |
| 1757 | wr.align(); |
| 1758 | wr.write((self.sum2 >> 8) as u16, 8); |
| 1759 | wr.write((self.sum2 & 0xFF) as u16, 8); |
| 1760 | wr.write((self.sum1 >> 8) as u16, 8); |
| 1761 | wr.write((self.sum1 & 0xFF) as u16, 8); |
| 1762 | } |
| 1763 | ///! Queues data for compression. |
| 1764 | ///! |
| 1765 | ///! The data might be not actually compressed until [`compress_end`] is called. |
| 1766 | ///! |
| 1767 | ///! [`compress_end`]: ./struct.Deflate.html#method.compress_end |
| 1768 | pub fn compress(&mut self, src: &[u8], wr: &mut DeflateWriter) { |
| 1769 | let mut src = src; |
| 1770 | while !src.is_empty() { |
| 1771 | let clen = src.len().min(MAX_BLOCK_SIZE - self.ssize); |
| 1772 | let (head, tail) = src.split_at(clen); |
| 1773 | src = tail; |
| 1774 | self.srcbuf[self.ssize..][..clen].copy_from_slice(head); |
| 1775 | self.ssize += clen; |
| 1776 | if self.ssize == MAX_BLOCK_SIZE { |
| 1777 | self.do_block(wr, false); |
| 1778 | } |
| 1779 | } |
| 1780 | } |
| 1781 | ///! Tells the encoder to finish data compression. |
| 1782 | ///! |
| 1783 | ///! Complete data will be output after this call. |
| 1784 | pub fn compress_end(&mut self, wr: &mut DeflateWriter) { |
| 1785 | if self.ssize > 0 { |
| 1786 | self.do_block(wr, true); |
| 1787 | } else { |
| 1788 | wr.write(1, 1); |
| 1789 | wr.write(1, 2); |
| 1790 | wr.write(0, 7); //static EOF sym |
| 1791 | } |
| 1792 | if self.zlib_mode { |
| 1793 | self.write_zlib_footer(wr); |
| 1794 | } |
| 1795 | } |
| 1796 | fn do_block(&mut self, wr: &mut DeflateWriter, final_block: bool) { |
| 1797 | const CRC_BASE: u32 = 65521; |
| 1798 | for &b in self.srcbuf[..self.ssize].iter() { |
| 1799 | self.sum1 += u32::from(b); |
| 1800 | if self.sum1 >= CRC_BASE { |
| 1801 | self.sum1 -= CRC_BASE; |
| 1802 | } |
| 1803 | self.sum2 += self.sum1; |
| 1804 | if self.sum2 >= CRC_BASE { |
| 1805 | self.sum2 -= CRC_BASE; |
| 1806 | } |
| 1807 | } |
| 1808 | match self.mode { |
| 1809 | Mode::Copy => { |
| 1810 | wr.write(final_block as u16, 1); |
| 1811 | wr.write(0, 2); |
| 1812 | wr.align(); |
| 1813 | wr.write(self.ssize as u16, 16); |
| 1814 | wr.write(!self.ssize as u16, 16); |
| 1815 | for &b in self.srcbuf[..self.ssize].iter() { |
| 1816 | wr.write(u16::from(b), 8); |
| 1817 | } |
| 1818 | }, |
| 1819 | Mode::Fixed => { |
| 1820 | wr.write(final_block as u16, 1); |
| 1821 | wr.write(1, 2); |
| 1822 | self.tokens.truncate(0); |
| 1823 | self.parser.parse(&self.srcbuf[..self.ssize], &mut self.tokens); |
| 1824 | let mut codes = CodeHuff::new(true); |
| 1825 | codes.make_codes(&self.tokens); |
| 1826 | let mut dists = DistHuff::new(true); |
| 1827 | dists.make_codes(&self.tokens); |
| 1828 | wr.write_tokens(&self.tokens, &codes, &dists); |
| 1829 | }, |
| 1830 | Mode::Dynamic => { |
| 1831 | wr.write(final_block as u16, 1); |
| 1832 | wr.write(2, 2); |
| 1833 | self.tokens.truncate(0); |
| 1834 | self.parser.parse(&self.srcbuf[..self.ssize], &mut self.tokens); |
| 1835 | let mut codes = CodeHuff::new(false); |
| 1836 | codes.make_codes(&self.tokens); |
| 1837 | let mut dists = DistHuff::new(false); |
| 1838 | dists.make_codes(&self.tokens); |
| 1839 | wr.write((codes.num_codes - 257) as u16, 5); |
| 1840 | wr.write((dists.num_codes - 1) as u16, 5); |
| 1841 | wr.write_codes(&codes, &dists); |
| 1842 | wr.write_tokens(&self.tokens, &codes, &dists); |
| 1843 | }, |
| 1844 | } |
| 1845 | self.ssize = 0; |
| 1846 | } |
| 1847 | } |
| 1848 | |
| 1849 | #[cfg(test)] |
| 1850 | mod test { |
| 1851 | use super::*; |
| 1852 | |
| 1853 | #[test] |
| 1854 | fn test_inflate1() { |
| 1855 | const TEST_DATA: &[u8] = &[ |
| 1856 | 0xF3, 0x48, 0xCD, 0xC9, 0xC9, 0xD7, 0x51, 0x28, |
| 1857 | 0xCF, 0x2F, 0xCA, 0x49, 0x51, 0x04, 0x00 ]; |
| 1858 | const TEST_REF: &[u8] = b"Hello, world!"; |
| 1859 | let mut dst_buf = [0u8; 13]; |
| 1860 | let len = Inflate::uncompress(TEST_DATA, &mut dst_buf).unwrap(); |
| 1861 | assert_eq!(len, 13); |
| 1862 | for i in 0..len { |
| 1863 | assert_eq!(dst_buf[i], TEST_REF[i]); |
| 1864 | } |
| 1865 | } |
| 1866 | #[test] |
| 1867 | fn test_inflate2() { |
| 1868 | const TEST_DATA3: &[u8] = &[ 0x4B, 0x4C, 0x44, 0x80, 0x24, 0x54, 0x80, 0x2C, 0x06, 0x00 ]; |
| 1869 | const TEST_REF3: &[u8] = b"aaaaaaaaaaaabbbbbbbbbbbbbbbaaaaabbbbbbb"; |
| 1870 | let mut dst_buf = [0u8; 39]; |
| 1871 | |
| 1872 | let mut inflate = Inflate::new(); |
| 1873 | let mut output_chunk = [0u8; 7]; |
| 1874 | let mut output_pos = 0; |
| 1875 | for input in TEST_DATA3.chunks(3) { |
| 1876 | let mut repeat = false; |
| 1877 | loop { |
| 1878 | let ret = inflate.decompress_data(input, &mut output_chunk, repeat); |
| 1879 | match ret { |
| 1880 | Ok(len) => { |
| 1881 | for i in 0..len { |
| 1882 | dst_buf[output_pos + i] = output_chunk[i]; |
| 1883 | } |
| 1884 | output_pos += len; |
| 1885 | break; |
| 1886 | }, |
| 1887 | Err(DecompressError::ShortData) => { |
| 1888 | break; |
| 1889 | }, |
| 1890 | Err(DecompressError::OutputFull) => { |
| 1891 | repeat = true; |
| 1892 | for i in 0..output_chunk.len() { |
| 1893 | dst_buf[output_pos + i] = output_chunk[i]; |
| 1894 | } |
| 1895 | output_pos += output_chunk.len(); |
| 1896 | }, |
| 1897 | _ => { |
| 1898 | panic!("decompress error {:?}", ret.err().unwrap()); |
| 1899 | }, |
| 1900 | } |
| 1901 | } |
| 1902 | } |
| 1903 | |
| 1904 | assert_eq!(output_pos, dst_buf.len()); |
| 1905 | for i in 0..output_pos { |
| 1906 | assert_eq!(dst_buf[i], TEST_REF3[i]); |
| 1907 | } |
| 1908 | } |
| 1909 | #[test] |
| 1910 | fn test_inflate3() { |
| 1911 | const TEST_DATA: &[u8] = &[ |
| 1912 | 0x1F, 0x8B, 0x08, 0x08, 0xF6, 0x7B, 0x90, 0x5E, 0x02, 0x03, 0x31, 0x2E, 0x74, 0x78, 0x74, 0x00, |
| 1913 | 0xE5, 0x95, 0x4B, 0x4E, 0xC3, 0x30, 0x10, 0x40, 0xF7, 0x39, 0xC5, 0x1C, 0x00, 0x16, 0x70, 0x83, |
| 1914 | 0x0A, 0xB5, 0x3B, 0xE8, 0x82, 0x5E, 0x60, 0x1A, 0x4F, 0xE2, 0x11, 0xFE, 0x44, 0x1E, 0xA7, 0x69, |
| 1915 | 0x6E, 0xCF, 0x38, 0xDD, 0xB0, 0x40, 0xA2, 0x46, 0x2D, 0x20, 0x2A, 0xE5, 0xAB, 0xCC, 0xE7, 0xBD, |
| 1916 | 0x49, 0xAC, 0x6C, 0x03, 0x64, 0x4B, 0xD0, 0x71, 0x92, 0x0C, 0x06, 0x67, 0x88, 0x1D, 0x3C, 0xD9, |
| 1917 | 0xC4, 0x92, 0x3D, 0x4A, 0xF3, 0x3C, 0x43, 0x4E, 0x23, 0x81, 0x8B, 0x07, 0x82, 0x1E, 0xF5, 0x90, |
| 1918 | 0x23, 0x78, 0x6A, 0x56, 0x30, 0x60, 0xCA, 0x89, 0x4D, 0x4F, 0xC0, 0x01, 0x10, 0x06, 0xC2, 0xA4, |
| 1919 | 0xA1, 0x44, 0xCD, 0xF6, 0x54, 0x50, 0xA8, 0x8D, 0xC1, 0x9C, 0x5F, 0x71, 0x37, 0x45, 0xC8, 0x63, |
| 1920 | 0xCA, 0x8E, 0xC0, 0xE8, 0x23, 0x69, 0x56, 0x9A, 0x8D, 0x5F, 0xB6, 0xC9, 0x96, 0x53, 0x4D, 0x17, |
| 1921 | 0xAB, 0xB9, 0xB0, 0x49, 0x14, 0x5A, 0x0B, 0x96, 0x82, 0x7C, 0xB7, 0x6F, 0x17, 0x35, 0xC7, 0x9E, |
| 1922 | 0xDF, 0x78, 0xA3, 0xF1, 0xD0, 0xA2, 0x73, 0x1C, 0x7A, 0xD8, 0x2B, 0xB3, 0x5C, 0x90, 0x85, 0xBB, |
| 1923 | 0x2A, 0x14, 0x2E, 0xF7, 0xD1, 0x19, 0x48, 0x0A, 0x23, 0x57, 0x45, 0x13, 0x3E, 0xD6, 0xA0, 0xBD, |
| 1924 | 0xF2, 0x11, 0x7A, 0x22, 0x21, 0xAD, 0xE5, 0x70, 0x56, 0xA0, 0x9F, 0xA5, 0xA5, 0x03, 0x85, 0x2A, |
| 1925 | 0xDE, 0x92, 0x00, 0x32, 0x61, 0x10, 0xAD, 0x27, 0x13, 0x7B, 0x5F, 0x98, 0x7F, 0x59, 0x83, 0xB8, |
| 1926 | 0xB7, 0x35, 0x16, 0xEB, 0x12, 0x0F, 0x1E, 0xD9, 0x14, 0x0B, 0xCF, 0xEE, 0x6D, 0x91, 0xF8, 0x93, |
| 1927 | 0x6E, 0x81, 0x3F, 0x7F, 0x41, 0xA4, 0x22, 0x1F, 0xB7, 0xE6, 0x85, 0x83, 0x9A, 0xA2, 0x61, 0x12, |
| 1928 | 0x0D, 0x0F, 0x6D, 0x01, 0xBD, 0xB0, 0xE8, 0x1D, 0xEC, 0xD1, 0xA0, 0xBF, 0x1F, 0x4E, 0xFB, 0x55, |
| 1929 | 0xBD, 0x73, 0xDD, 0x87, 0xB9, 0x53, 0x23, 0x17, 0xD3, 0xE2, 0xE9, 0x08, 0x87, 0x42, 0xFF, 0xCF, |
| 1930 | 0x26, 0x42, 0xAE, 0x76, 0xB5, 0xAE, 0x97, 0x0C, 0x18, 0x78, 0xA0, 0x24, 0xE5, 0x54, 0x0C, 0x6E, |
| 1931 | 0x60, 0x52, 0x79, 0x22, 0x57, 0xF5, 0x87, 0x78, 0x78, 0x04, 0x93, 0x46, 0xEF, 0xCB, 0x98, 0x96, |
| 1932 | 0x8B, 0x65, 0x00, 0xB7, 0x36, 0xBD, 0x77, 0xA8, 0xBD, 0x5A, 0xAA, 0x1A, 0x09, 0x00, 0x00 |
| 1933 | ]; |
| 1934 | |
| 1935 | let mut mr = MemoryReader::new_read(TEST_DATA); |
| 1936 | let mut br = ByteReader::new(&mut mr); |
| 1937 | let _dst_buf = gzip_decode(&mut br, false).unwrap(); |
| 1938 | |
| 1939 | // println!("{}", String::from_utf8_lossy(_dst_buf.as_slice())); |
| 1940 | } |
| 1941 | #[test] |
| 1942 | fn test_deflate_crc() { |
| 1943 | let output = Vec::with_capacity(20); |
| 1944 | let mut writer = DeflateWriter::new(output); |
| 1945 | let mut compr = Deflate::new(DeflateMode::NoCompr); |
| 1946 | compr.write_zlib_header(&mut writer); |
| 1947 | compr.compress(b"Hello, world!", &mut writer); |
| 1948 | compr.compress_end(&mut writer); |
| 1949 | let output = writer.end(); |
| 1950 | assert_eq!(output.as_slice(), b"\x78\x01\x01\x0D\x00\xF2\xFFHello, world!\x20\x5E\x04\x8A"); |
| 1951 | } |
| 1952 | fn deflate_test(mode: DeflateMode) { |
| 1953 | const SRC: &[u8] = |
| 1954 | b"The first day of Christmas, |
| 1955 | My true love sent to me |
| 1956 | A partridge in a pear tree. |
| 1957 | |
| 1958 | The second day of Christmas, |
| 1959 | My true love sent to me |
| 1960 | Two turtle doves, and |
| 1961 | A partridge in a pear tree. |
| 1962 | |
| 1963 | The third day of Christmas, |
| 1964 | My true love sent to me |
| 1965 | Three French hens, |
| 1966 | Two turtle doves, and |
| 1967 | A partridge in a pear tree. |
| 1968 | |
| 1969 | The fourth day of Christmas, |
| 1970 | My true love sent to me |
| 1971 | Four colly birds, |
| 1972 | Three French hens, |
| 1973 | Two turtle doves, and |
| 1974 | A partridge in a pear tree. |
| 1975 | |
| 1976 | The fifth day of Christmas, |
| 1977 | My true love sent to me |
| 1978 | Five gold rings, |
| 1979 | Four colly birds, |
| 1980 | Three French hens, |
| 1981 | Two turtle doves, and |
| 1982 | A partridge in a pear tree."; |
| 1983 | let output = Vec::with_capacity(SRC.len() + 16); |
| 1984 | let mut writer = DeflateWriter::new(output); |
| 1985 | let mut compr = Deflate::new(mode); |
| 1986 | compr.write_zlib_header(&mut writer); |
| 1987 | compr.compress(SRC, &mut writer); |
| 1988 | compr.compress_end(&mut writer); |
| 1989 | let output = writer.end(); |
| 1990 | let mut uncompr = vec![0u8; SRC.len()]; |
| 1991 | Inflate::uncompress(&output, &mut uncompr).unwrap(); |
| 1992 | assert_eq!(SRC, uncompr.as_slice()); |
| 1993 | } |
| 1994 | #[test] |
| 1995 | fn test_deflate_fast() { |
| 1996 | deflate_test(DeflateMode::Fast); |
| 1997 | } |
| 1998 | #[test] |
| 1999 | fn test_deflate_better() { |
| 2000 | deflate_test(DeflateMode::Better); |
| 2001 | } |
| 2002 | #[test] |
| 2003 | fn test_deflate_best() { |
| 2004 | deflate_test(DeflateMode::Best); |
| 2005 | } |
| 2006 | } |