1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::codecs::{MV, ZERO_MV};
4 use super::vpcommon::*;
6 use super::vp78data::*;
10 #[derive(Clone,Copy,PartialEq,Debug)]
18 impl Default for VP8Ref {
19 fn default() -> Self { VP8Ref::Intra }
23 pub struct VP8Shuffler {
24 lastframe: Option<NAVideoBufferRef<u8>>,
25 goldframe: Option<NAVideoBufferRef<u8>>,
26 altframe: Option<NAVideoBufferRef<u8>>,
30 pub fn new() -> Self { Self::default() }
31 pub fn clear(&mut self) {
32 self.lastframe = None;
33 self.goldframe = None;
36 pub fn add_frame(&mut self, buf: NAVideoBufferRef<u8>) {
37 self.lastframe = Some(buf);
39 pub fn add_golden_frame(&mut self, buf: NAVideoBufferRef<u8>) {
40 self.goldframe = Some(buf);
42 pub fn add_altref_frame(&mut self, buf: NAVideoBufferRef<u8>) {
43 self.altframe = Some(buf);
45 pub fn get_last(&mut self) -> Option<NAVideoBufferRef<u8>> {
46 if let Some(ref frm) = self.lastframe {
52 pub fn get_golden(&mut self) -> Option<NAVideoBufferRef<u8>> {
53 if let Some(ref frm) = self.goldframe {
59 pub fn get_altref(&mut self) -> Option<NAVideoBufferRef<u8>> {
60 if let Some(ref frm) = self.altframe {
66 pub fn has_refs(&self) -> bool {
67 self.lastframe.is_some()
72 coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4],
76 pub const COEF_NE_TREE: &[VPTreeDef<DCTToken>] = &[
77 VPTreeDef::Value(DCTToken::Zero), VPTreeDef::Index(2),
78 VPTreeDef::Value(DCTToken::One), VPTreeDef::Index(4),
79 VPTreeDef::Index(6), VPTreeDef::Index(10),
80 VPTreeDef::Value(DCTToken::Two), VPTreeDef::Index(8),
81 VPTreeDef::Value(DCTToken::Three), VPTreeDef::Value(DCTToken::Four),
82 VPTreeDef::Index(12), VPTreeDef::Index(14),
83 VPTreeDef::Value(DCTToken::Cat1), VPTreeDef::Value(DCTToken::Cat2),
84 VPTreeDef::Index(16), VPTreeDef::Index(18),
85 VPTreeDef::Value(DCTToken::Cat3), VPTreeDef::Value(DCTToken::Cat4),
86 VPTreeDef::Value(DCTToken::Cat5), VPTreeDef::Value(DCTToken::Cat6)
89 fn decode_subblock(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 {
90 const COEF_BANDS: [usize; 16] = [ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
93 let start = if ctype != 0 { 0 } else { 1 };
95 let mut cval = pctx as usize;
96 for idx in start..16 {
97 let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval];
98 let tok = if cval != 0 || idx == start {
99 bc.read_tree(COEF_TREE, probs)
101 bc.read_tree(COEF_NE_TREE, &probs[1..])
103 if tok == DCTToken::EOB { break; }
104 let level = expand_token(bc, tok);
105 coeffs[DEFAULT_SCAN_ORDER[idx]] = level.wrapping_mul(sbparams.qmat[idx]);
106 cval = level.abs().min(2) as usize;
109 if has_nz > 0 { 1 } else { 0 }
112 #[derive(Clone,Copy,Default)]
122 #[derive(Clone,Copy,Default)]
130 kf_ymode_prob: [u8; 4],
131 kf_uvmode_prob: [u8; 3],
133 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
134 mv_probs: [[u8; 19]; 2],
136 segment_probs: [u8; 3],
140 struct DecoderState {
142 loop_filter_level: u8,
148 kf_ymode_prob: [u8; 4],
149 kf_uvmode_prob: [u8; 3],
154 sign_bias: [bool; 2],
156 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
157 mv_probs: [[u8; 19]; 2],
160 update_seg_map: bool,
161 force_quant: Option<u8>,
162 force_loop_str: Option<u8>,
163 segment_probs: [u8; 3],
165 seg_feature_mode: bool,
168 lf_frame_delta: [i8; 4],
169 lf_mode_delta: [i8; 4],
173 ipred_ctx_y: IPredContext,
174 ipred_ctx_u: IPredContext,
175 ipred_ctx_v: IPredContext,
179 fn reset(&mut self) {
180 const VP8_DEFAULT_MV_PROBS: [[u8; 19]; 2] = [
181 [ 162, 128, 225, 146, 172, 147, 214, 39, 156, 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 ],
182 [ 164, 128, 204, 170, 119, 235, 140, 230, 228, 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 ]
185 self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS);
186 self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS);
187 self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS);
188 self.mv_probs.copy_from_slice(&VP8_DEFAULT_MV_PROBS);
189 self.segment_probs = [255; 3];
190 self.seg = [Segment::default(); 4];
192 fn restore(&mut self, dst: &SavedProbs) {
193 self.kf_ymode_prob = dst.kf_ymode_prob;
194 self.kf_uvmode_prob = dst.kf_uvmode_prob;
195 self.coef_probs = dst.coef_probs;
196 self.mv_probs = dst.mv_probs;
197 self.segment_probs = dst.segment_probs;
199 fn save(&self, dst: &mut SavedProbs) {
200 dst.kf_ymode_prob = self.kf_ymode_prob;
201 dst.kf_uvmode_prob = self.kf_uvmode_prob;
202 dst.coef_probs = self.coef_probs;
203 dst.mv_probs = self.mv_probs;
204 // dst.segment_probs = self.segment_probs;
208 fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 19]) -> i16 {
209 const LONG_VECTOR_ORDER: [usize; 9] = [ 0, 1, 2, 9, 8, 7, 6, 5, 4 ];
211 let val = if !bc.read_prob(probs[0]) {
212 bc.read_tree(SMALL_MV_TREE, &probs[2..9])
214 let raw_probs = &probs[9..];
216 for ord in LONG_VECTOR_ORDER.iter() {
217 raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord;
219 if (raw & 0x3F0) != 0 {
220 raw |= (bc.read_prob(raw_probs[3]) as i16) << 3;
226 if (val == 0) || !bc.read_prob(probs[1]) {
234 info: NACodecInfoRef,
241 mb_info: Vec<MBInfo>,
245 ymodes: Vec<PredMode>,
247 uvmodes: Vec<PredMode>,
248 uvmode_stride: usize,
250 dstate: DecoderState,
252 tmp_probs: SavedProbs,
254 coeffs: [[i16; 16]; 25],
255 qmat: [[[i16; 16]; 3]; 5],
257 mc_buf: NAVideoBufferRef<u8>,
264 let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap();
265 let mc_buf = vt.get_vbuf().unwrap();
267 info: NACodecInfoRef::default(),
269 shuf: VP8Shuffler::new(),
283 dstate: DecoderState::default(),
284 pcache: PredCache::new(),
285 tmp_probs: SavedProbs::default(),
287 coeffs: [[0; 16]; 25],
288 qmat: [[[0; 16]; 3]; 5],
295 fn set_dimensions(&mut self, width: usize, height: usize) {
296 if (width == self.width) && (height == self.height) {
300 self.height = height;
301 self.mb_w = (self.width + 15) >> 4;
302 self.mb_h = (self.height + 15) >> 4;
303 self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
304 self.mv_stride = self.mb_w * 4;
305 self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV);
307 self.ymode_stride = self.mb_w * 4;
308 self.uvmode_stride = self.mb_w;
309 self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default());
310 self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default());
312 self.pcache.resize(self.mb_w);
314 self.seg_map.clear();
315 self.seg_map.resize(self.mb_w * self.mb_h, 0);
317 fn update_segmentation(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
318 self.dstate.update_seg_map = bc.read_bool();
320 self.dstate.seg_feature_mode = bc.read_bool();
321 for seg in self.dstate.seg.iter_mut() {
323 let quant_upd_val = bc.read_bits(7) as i8;
324 let quant_upd_sign = bc.read_bool();
325 seg.quant = if !quant_upd_sign { quant_upd_val } else { -quant_upd_val };
328 for seg in self.dstate.seg.iter_mut() {
330 let lf_upd_val = bc.read_bits(6) as i8;
331 let lf_upd_sign = bc.read_bool();
332 seg.lf = if !lf_upd_sign { lf_upd_val } else { -lf_upd_val };
336 if self.dstate.update_seg_map {
337 self.tmp_probs.segment_probs = self.dstate.segment_probs;
338 for prob in self.dstate.segment_probs.iter_mut() {
340 *prob = bc.read_byte();
346 fn mb_lf_adjustments(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
347 self.dstate.lf_delta = bc.read_bool();
348 if self.dstate.lf_delta {
350 for frame_delta in self.dstate.lf_frame_delta.iter_mut() {
352 let delta_magn = bc.read_bits(6) as i8;
353 let delta_sign = bc.read_bool();
354 *frame_delta = if !delta_sign { delta_magn } else { -delta_magn };
357 for mode_delta in self.dstate.lf_mode_delta.iter_mut() {
359 let delta_magn = bc.read_bits(6) as i8;
360 let delta_sign = bc.read_bool();
361 *mode_delta = if !delta_sign { delta_magn } else { -delta_magn };
368 fn read_delta_quant(bc: &mut BoolCoder, y_ac_q: usize) -> DecoderResult<usize> {
370 let delta = bc.read_bits(4) as usize;
372 Ok(y_ac_q.saturating_sub(delta))
374 Ok((y_ac_q + delta).min(127))
380 fn quant_indices(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
381 let y_ac_q = bc.read_bits(7) as usize;
382 let y_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
383 let y2_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
384 let y2_ac_q = Self::read_delta_quant(bc, y_ac_q)?;
385 let uv_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
386 let uv_ac_q = Self::read_delta_quant(bc, y_ac_q)?;
387 self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
391 fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
396 if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) {
397 self.dstate.coef_probs[i][j][k][l] = bc.read_byte();
405 fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
406 const MV_UPDATE_PROBS: [[u8; 19]; 2] = [
407 [ 237, 246, 253, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 250, 250, 252, 254, 254 ],
408 [ 231, 243, 245, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 251, 251, 254, 254, 254 ]
412 if bc.read_prob(MV_UPDATE_PROBS[comp][i]) {
413 self.dstate.mv_probs[comp][i] = bc.read_probability();
419 fn decode_mb_features(&mut self, bc: &mut BoolCoder, mb_idx: usize) -> DecoderResult<()> {
420 let segment_id = bc.read_tree(FEATURE_TREE, &self.dstate.segment_probs);
421 self.seg_map[mb_idx] = segment_id as u8;
425 fn set_cur_segment(&mut self, mb_idx: usize) {
426 self.dstate.force_quant = Some(self.seg_map[mb_idx]);
427 let seg_id = self.seg_map[mb_idx] as usize;
428 let segment = &self.dstate.seg[seg_id];
429 let loop_str = if self.dstate.seg_feature_mode {
432 (i16::from(self.dstate.loop_filter_level) + i16::from(segment.lf)).max(0).min(63) as u8
434 self.dstate.force_loop_str = Some(loop_str);
436 fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize) -> bool {
437 let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 };
438 let mut sbparams = SBParams {
439 qmat: &self.qmat[qmat_idx][2],
440 coef_probs: &self.dstate.coef_probs,
442 let mut has_ac = [false; 25];
443 let mut coded = false;
445 if self.dstate.has_y2 {
446 let pred = &self.pcache.y2_pred;
447 let pidx = pred.xpos + mb_x;
448 let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride];
450 let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams);
451 self.pcache.y2_pred.data[pidx] = has_nz;
452 self.pcache.y2_pred_left = has_nz;
453 has_ac[24] = has_nz > 0;
454 coded |= has_ac[24] | (self.coeffs[24][0] != 0);
458 let pred = &mut self.pcache.y2_pred;
459 let pidx = pred.xpos + mb_x;
460 pred.data[pidx] = pred.data[pidx - pred.stride];
464 sbparams.qmat = &self.qmat[qmat_idx][0];
468 let pred = &self.pcache.y_pred;
469 let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride;
470 let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride];
472 let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams);
473 self.pcache.y_pred.data[pidx] = has_nz;
474 self.pcache.y_pred_left[by] = has_nz;
475 has_ac[i] = has_nz > 0;
476 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
478 sbparams.qmat = &self.qmat[qmat_idx][1];
481 let by = (i >> 1) & 1;
482 let pred = &self.pcache.u_pred;
483 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
484 let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride];
486 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
487 self.pcache.u_pred.data[pidx] = has_nz;
488 self.pcache.u_pred_left[by] = has_nz;
489 has_ac[i] = has_nz > 0;
490 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
494 let by = (i >> 1) & 1;
495 let pred = &self.pcache.v_pred;
496 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
497 let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride];
499 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
500 self.pcache.v_pred.data[pidx] = has_nz;
501 self.pcache.v_pred_left[by] = has_nz;
502 has_ac[i] = has_nz > 0;
503 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
506 if self.dstate.has_y2 {
507 let y2block = &mut self.coeffs[24];
510 } else if y2block[0] != 0 {
514 self.coeffs[i][0] = self.coeffs[24][i];
519 idct4x4(&mut self.coeffs[i]);
520 } else if self.coeffs[i][0] != 0 {
521 idct4x4_dc(&mut self.coeffs[i]);
528 fn set_single_qmat(qmat: &mut [[i16; 16]; 3], y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
529 qmat[0][0] = DC_QUANTS[y_dc_q];
531 qmat[0][i] = AC_QUANTS[y_ac_q];
533 qmat[1][0] = DC_QUANTS[uv_dc_q].min(132);
535 qmat[1][i] = AC_QUANTS[uv_ac_q];
537 qmat[2][0] = DC_QUANTS[y2_dc_q] * 2;
539 qmat[2][i] = (i32::from(AC_QUANTS[y2_ac_q]) * 155 / 100).max(8) as i16;
542 fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
543 Self::set_single_qmat(&mut self.qmat[0], y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
544 if self.dstate.segmentation {
545 for (qmat, seg) in self.qmat[1..].iter_mut().zip(self.dstate.seg.iter()) {
546 let q = if self.dstate.seg_feature_mode {
547 seg.quant.max(0) as usize
549 ((y_ac_q as i16) + i16::from(seg.quant)).max(0).min(127) as usize
551 Self::set_single_qmat(qmat, q, q, q, q, q, q);
555 fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) {
556 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
559 self.ymodes[iidx + x] = ymode;
561 iidx += self.ymode_stride;
564 fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) {
565 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
568 self.mvs[iidx + x] = mv;
570 iidx += self.mb_w * 4;
573 fn get_frame_sign(&self, rframe: VP8Ref) -> bool {
575 VP8Ref::Golden => self.dstate.sign_bias[0],
576 VP8Ref::AltRef => self.dstate.sign_bias[1],
580 fn find_mv_pred(&self, mb_x: usize, mb_y: usize, frm_sign: bool) -> ([u8; 4], MV, MV, MV) {
581 const VP8_MV_PROBS: [[u8; 4]; 6] = [
586 [ 159, 134, 128, 34 ],
587 [ 234, 188, 128, 28 ]
590 const OFFS: [(u8, u8, u8); 3] = [(0, 1, 2), (1, 0, 2), (1, 1, 1)];
591 let mut mvs = [ZERO_MV; 3];
592 let mut mvc = [0; 3];
596 let mut nearest_mv = ZERO_MV;
597 let mut near_mv = ZERO_MV;
599 for &(x, y, weight) in OFFS.iter() {
600 let mv = if (x == 0 || mb_x > 0) && (y == 0 || mb_y > 0) {
601 let x = usize::from(x);
602 let y = usize::from(y);
603 let mb_idx = mb_x - x + (mb_y - y) * self.mb_w;
604 if self.mb_info[mb_idx].mb_type.is_intra() {
607 if self.mb_info[mb_idx].mb_type == VPMBType::InterFourMV {
610 let rsign = self.get_frame_sign(self.mb_info[mb_idx].rframe);
611 let mut mv_idx = mb_x * 4 + mb_y * 4 * self.mv_stride;
613 mv_idx += self.mv_stride * 3 - 1;
614 } else if x == 0 { // top
615 mv_idx -= self.mv_stride;
618 mv_idx -= self.mv_stride + 1;
620 if rsign == frm_sign {
628 let mut found = false;
638 mvc[num_mv] = weight;
667 let mut best_mv = mvs[0];
670 for (&mv, &count) in mvs[..num_mv].iter().zip(mvc.iter()) {
672 if nearest_mv == ZERO_MV {
685 for (&mv, &count) in mvs[..num_mv].iter().zip(mvc.iter()) {
692 let best_mv = self.clip_mv(best_mv, mb_x, mb_y);
694 let mvprobs = [VP8_MV_PROBS[ct[0] as usize][0],
695 VP8_MV_PROBS[ct[1] as usize][1],
696 VP8_MV_PROBS[ct[2] as usize][2],
697 VP8_MV_PROBS[ct[3] as usize][3]];
699 (mvprobs, self.clip_mv(nearest_mv, mb_x, mb_y), self.clip_mv(near_mv, mb_x, mb_y), best_mv)
701 fn clip_mv(&self, mv: MV, mb_x: usize, mb_y: usize) -> MV {
702 let pos_x = (mb_x as i32) * 16 * 4;
703 let pos_y = (mb_y as i32) * 16 * 4;
704 let mv_x = (pos_x + i32::from(mv.x)).max(-16 * 4).min((self.mb_w as i32) * 16 * 4);
705 let mv_y = (pos_y + i32::from(mv.y)).max(-16 * 4).min((self.mb_h as i32) * 16 * 4);
706 MV {x: (mv_x - pos_x) as i16, y: (mv_y - pos_y) as i16 }
708 fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV {
709 const SUB_MV_REF_PROBS: [[u8; 3]; 5] = [
717 let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride;
718 let left_mv = if (mb_x > 0) || (bx > 0) {
723 let top_mv = if (mb_y > 0) || (by > 0) {
724 self.mvs[mvidx - self.mv_stride]
729 let idx = if left_mv == top_mv {
730 if left_mv == ZERO_MV {
735 } else if top_mv == ZERO_MV {
737 } else if left_mv == ZERO_MV {
742 let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS[idx]);
744 SubMVRef::Left => left_mv,
745 SubMVRef::Above => top_mv,
746 SubMVRef::Zero => ZERO_MV,
748 let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]);
749 let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]);
750 pred_mv + MV{ x: dmx, y: dmy }
754 fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> {
755 let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS);
756 let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
758 MVSplitMode::TopBottom => {
759 let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
761 for x in 0..4 { self.mvs[mvidx + x] = top_mv; }
762 mvidx += self.mv_stride;
764 let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv);
766 for x in 0..4 { self.mvs[mvidx + x] = bot_mv; }
767 mvidx += self.mv_stride;
770 MVSplitMode::LeftRight => {
771 let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
772 self.mvs[mvidx + 1] = left_mv;
773 let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv);
775 self.mvs[mvidx + 0] = left_mv;
776 self.mvs[mvidx + 1] = left_mv;
777 self.mvs[mvidx + 2] = right_mv;
778 self.mvs[mvidx + 3] = right_mv;
779 mvidx += self.mv_stride;
782 MVSplitMode::Quarters => {
783 for y in (0..4).step_by(2) {
784 for x in (0..4).step_by(2) {
785 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
786 self.mvs[mvidx + x + 1] = self.mvs[mvidx + x];
789 self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x];
791 mvidx += self.mv_stride * 2;
794 MVSplitMode::Sixteenths => {
797 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
799 mvidx += self.mv_stride;
806 fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool) {
808 let ydst = &mut dframe.data[dframe.offset[0]..];
809 let ystride = dframe.stride[0];
810 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
813 add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]);
818 let dst = &mut dframe.data[0..];
819 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
820 let ustride = dframe.stride[1];
821 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
822 let vstride = dframe.stride[2];
825 add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]);
826 add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]);
832 fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, mb_coeff_skip: bool) -> DecoderResult<()> {
833 let mb_idx = mb_x + mb_y * self.mb_w;
834 let has_top = mb_y > 0;
835 let has_left = mb_x > 0;
836 let ydst = &mut dframe.data[dframe.offset[0]..];
837 let ystride = dframe.stride[0];
838 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
839 let ipred_ctx_y = &mut self.dstate.ipred_ctx_y;
840 ipred_ctx_y.has_top = has_top;
841 ipred_ctx_y.has_left = has_left;
842 let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred;
844 ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16);
845 if !has_top && self.mb_info[mb_idx].ymode == PredMode::VPred {
846 IPred16x16::ipred_const(ydst, yoff, ystride, 0x7F)
847 } else if !has_left && self.mb_info[mb_idx].ymode == PredMode::HPred {
848 IPred16x16::ipred_const(ydst, yoff, ystride, 0x81)
850 match self.mb_info[mb_idx].ymode {
851 PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y),
852 PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y),
853 PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y),
854 PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y),
859 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
860 let mut tr_save = [0x7Fu8; 16];
861 let tr_edge = if has_top { ydst[yoff - ystride + 15] } else { 0x7F };
864 ipred_ctx_y.has_left = has_left || x > 0;
865 let bmode = self.ymodes[iidx + x];
866 let cur_yoff = yoff + x * 4;
867 let has_tr = ipred_ctx_y.has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1)));
868 let has_dl = ipred_ctx_y.has_left && (x == 0) && (y < 3);
869 ipred_ctx_y.fill(ydst, cur_yoff, ystride,
870 if has_tr { 8 } else { 4 },
871 if has_dl { 8 } else { 4 });
873 if !has_top && y == 0 && (has_left || x > 0) && bmode != PredMode::TMPred {
874 ipred_ctx_y.top = [0x7F; 16];
875 ipred_ctx_y.tl = 0x7F;
877 if !has_left && x == 0 && (has_top || y > 0) && bmode != PredMode::TMPred {
878 ipred_ctx_y.left = [0x81; 16];
879 ipred_ctx_y.tl = 0x81;
881 if !has_left && !has_top && x == 0 && y == 0 && bmode != PredMode::DCPred {
882 ipred_ctx_y.top = [0x7F; 16];
883 ipred_ctx_y.left = [0x81; 16];
884 ipred_ctx_y.tl = 0x7F;
889 ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i];
893 tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4];
896 if (mb_x == self.mb_w - 1) && has_top && (x == 3) {
898 ipred_ctx_y.top[i + 4] = tr_edge;
902 PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y),
903 PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y),
904 PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y),
905 PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y),
906 PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y),
907 PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y),
908 PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y),
909 PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y),
910 PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y),
911 PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y),
915 add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]);
918 ipred_ctx_y.has_top = true;
920 iidx += self.ymode_stride;
923 let dst = &mut dframe.data[0..];
924 let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
925 let ustride = dframe.stride[1];
926 let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
927 let vstride = dframe.stride[2];
928 let ipred_ctx_u = &mut self.dstate.ipred_ctx_u;
929 let ipred_ctx_v = &mut self.dstate.ipred_ctx_v;
930 ipred_ctx_u.has_top = has_top;
931 ipred_ctx_v.has_top = has_top;
932 ipred_ctx_u.has_left = has_left;
933 ipred_ctx_v.has_left = has_left;
934 ipred_ctx_u.fill(dst, uoff, ustride, 8, 8);
935 ipred_ctx_v.fill(dst, voff, vstride, 8, 8);
937 if !has_top && self.mb_info[mb_idx].uvmode == PredMode::VPred {
938 IPred8x8::ipred_const(dst, uoff, ustride, 0x7F);
939 IPred8x8::ipred_const(dst, voff, vstride, 0x7F);
940 } else if !has_left && self.mb_info[mb_idx].uvmode == PredMode::HPred {
941 IPred8x8::ipred_const(dst, uoff, ustride, 0x81);
942 IPred8x8::ipred_const(dst, voff, vstride, 0x81);
944 match self.mb_info[mb_idx].uvmode {
945 PredMode::DCPred => {
946 IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u);
947 IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v);
950 IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u);
951 IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v);
954 IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u);
955 IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v);
957 PredMode::TMPred => {
958 IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u);
959 IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v);
965 self.add_residue(dframe, mb_x, mb_y, is_normal);
969 fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, mb_coeff_skip: bool, rframe: VP8Ref) {
970 let refframe = match rframe {
971 VP8Ref::Last => self.shuf.get_last(),
972 VP8Ref::Golden => self.shuf.get_golden(),
973 VP8Ref::AltRef => self.shuf.get_altref(),
974 VP8Ref::Intra => unreachable!(),
976 let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV;
977 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
978 let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
980 let dst = &mut dframe.data[0..];
981 let ystride = dframe.stride[0];
982 let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
984 if self.dstate.version == 0 {
985 mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16,
986 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf);
988 mc_block16x16_bilin(dst, yoff, ystride, mb_x * 16, mb_y * 16,
989 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf);
994 if self.dstate.version == 0 {
995 mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
996 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf);
998 mc_block4x4_bilin(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
999 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf);
1002 yoff += 4 * ystride;
1003 iidx += self.mv_stride;
1007 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
1008 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
1009 let ustride = dframe.stride[1];
1010 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
1011 let vstride = dframe.stride[2];
1013 let mut chroma_mv = self.mvs[iidx];
1015 if self.dstate.version == 0 {
1016 mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1017 mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, &mut mc_buf);
1019 if self.dstate.version == 3 {
1023 mc_block8x8_bilin(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1024 mc_block8x8_bilin(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, &mut mc_buf);
1029 let mut chroma_mv = self.mvs[iidx + x * 2] + self.mvs[iidx + x * 2 + 1]
1030 + self.mvs[iidx + x * 2 + self.mv_stride]
1031 + self.mvs[iidx + x * 2 + self.mv_stride + 1];
1032 if chroma_mv.x < 0 {
1037 if chroma_mv.y < 0 {
1045 if self.dstate.version == 3 {
1050 if self.dstate.version == 0 {
1051 mc_block4x4(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1052 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1053 mc_block4x4(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1054 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1056 mc_block4x4_bilin(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1057 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1058 mc_block4x4_bilin(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1059 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1062 uoff += ustride * 4;
1063 voff += vstride * 4;
1064 iidx += 2 * self.mv_stride;
1068 self.add_residue(dframe, mb_x, mb_y, true);
1071 fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8, filter_inner: bool) {
1072 const HIGH_EDGE_VAR_THR: [[u8; 64]; 2] = [
1074 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1075 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1076 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
1077 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
1079 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1080 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1081 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
1082 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
1085 let inner_thr = if self.dstate.loop_sharpness == 0 {
1088 let bound1 = i16::from(9 - self.dstate.loop_sharpness);
1089 let shift = (self.dstate.loop_sharpness + 3) >> 2;
1090 (i16::from(loop_str) >> shift).min(bound1).max(1)
1092 let blk_thr = i16::from(loop_str) * 2 + inner_thr;
1093 let edge_thr = blk_thr + 4;
1094 let hev_thr = i16::from(HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize]);
1096 let ystride = dframe.stride[0];
1097 let ustride = dframe.stride[1];
1098 let vstride = dframe.stride[2];
1099 let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
1100 let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride;
1101 let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride;
1103 let (loop_edge, loop_inner) = if self.dstate.lf_simple {
1104 (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc)
1106 (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc)
1110 loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr);
1111 if !self.dstate.lf_simple {
1112 loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr);
1113 loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr);
1118 loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, blk_thr, inner_thr, hev_thr);
1120 if !self.dstate.lf_simple {
1121 loop_inner(dframe.data, upos + 4, 1, ustride, 8, blk_thr, inner_thr, hev_thr);
1122 loop_inner(dframe.data, vpos + 4, 1, vstride, 8, blk_thr, inner_thr, hev_thr);
1127 loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr);
1128 if !self.dstate.lf_simple {
1129 loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr);
1130 loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr);
1135 loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, blk_thr, inner_thr, hev_thr);
1137 if !self.dstate.lf_simple {
1138 loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, blk_thr, inner_thr, hev_thr);
1139 loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, blk_thr, inner_thr, hev_thr);
1145 impl NADecoder for VP8Decoder {
1146 fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> {
1147 if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
1148 let fmt = YUV420_FORMAT;
1149 let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt);
1150 let myinfo = NACodecTypeInfo::Video(myvinfo);
1151 self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref();
1153 supp.pool_u8.set_dec_bufs(5);
1154 supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?;
1155 self.set_dimensions(myvinfo.get_width(), myvinfo.get_height());
1158 Err(DecoderError::InvalidData)
1161 #[allow(clippy::cognitive_complexity)]
1162 fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> {
1163 let src = pkt.get_buffer();
1164 validate!(src.len() > 4);
1166 let frame_tag = read_u24le(src.as_slice())?;
1167 self.dstate.is_intra = (frame_tag & 1) == 0;
1168 self.dstate.version = ((frame_tag >> 1) & 7) as u8;
1169 validate!(self.dstate.version <= 3);
1170 let _show_frame = ((frame_tag >> 4) & 1) != 0;
1171 let part1_off = if self.dstate.is_intra { 10 } else { 3 };
1172 let part2_off = (frame_tag >> 5) as usize;
1173 validate!(src.len() >= part2_off && part2_off > part1_off);
1175 if self.dstate.is_intra {
1176 validate!(src.len() > 10);
1177 let marker = read_u24be(&src[3..6])?;
1178 validate!(marker == 0x9D012A);
1179 let width_ = read_u16le(&src[6..8])?;
1180 let height_ = read_u16le(&src[8..10])?;
1181 let width = ((width_ + 1) & 0x3FFE) as usize;
1182 let height = ((height_ + 1) & 0x3FFE) as usize;
1183 // let hscale = width_ >> 14;
1184 // let vscale = height_ >> 14;
1186 validate!((width > 0) && (height > 0));
1187 self.set_dimensions(width, height);
1189 self.dstate.reset();
1191 if !self.shuf.has_refs() {
1192 return Err(DecoderError::MissingReference);
1196 let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?;
1198 if self.dstate.is_intra {
1199 let _color_space = bc.read_bool();
1200 let _clamping_type = bc.read_bool();
1203 self.dstate.segmentation = bc.read_bool();
1204 if self.dstate.segmentation {
1205 self.update_segmentation(&mut bc)?;
1207 self.dstate.update_seg_map = false;
1208 self.dstate.force_quant = None;
1209 self.dstate.force_loop_str = None;
1212 self.dstate.lf_simple = bc.read_bool();
1213 self.dstate.loop_filter_level = bc.read_bits(6) as u8;
1214 self.dstate.loop_sharpness = bc.read_bits(3) as u8;
1216 self.mb_lf_adjustments(&mut bc)?;
1218 let num_partitions = 1 << bc.read_bits(2);
1220 self.quant_indices(&mut bc)?;
1222 let (keep_probs, update_last, update_gf, update_ar) = if self.dstate.is_intra {
1223 let refresh_entropy_probs = bc.read_bool();
1224 (refresh_entropy_probs, true, 4, 4)
1226 let refresh_golden_frame = bc.read_bool();
1227 let refresh_alternate_frame = bc.read_bool();
1228 let copy_to_golden = if !refresh_golden_frame {
1231 validate!(copy_to_golden != 3);
1232 let copy_to_altref = if !refresh_alternate_frame {
1235 validate!(copy_to_altref != 3);
1236 self.dstate.sign_bias[0] = bc.read_bool();
1237 self.dstate.sign_bias[1] = bc.read_bool();
1238 let refresh_entropy_probs = bc.read_bool();
1239 let refresh_last = bc.read_bool();
1240 (refresh_entropy_probs, refresh_last, copy_to_golden, copy_to_altref)
1244 self.dstate.save(&mut self.tmp_probs);
1247 self.read_dct_coef_prob_upd(&mut bc)?;
1249 let mb_no_coeff_skip = bc.read_bool();
1250 let prob_skip_false = bc.read_byte();
1252 if !self.dstate.is_intra {
1253 self.dstate.prob_intra_pred = bc.read_byte();
1254 self.dstate.prob_last_pred = bc.read_byte();
1255 self.dstate.prob_gold_pred = bc.read_byte();
1258 self.dstate.kf_ymode_prob[i] = bc.read_byte();
1263 self.dstate.kf_uvmode_prob[i] = bc.read_byte();
1266 self.read_mv_prob_upd(&mut bc)?;
1269 let mut data_start = part1_off + part2_off + (num_partitions - 1) * 3;
1270 let mut part_offs = [0; 8];
1271 validate!(data_start <= src.len());
1272 let mut size = src.len() - data_start;
1273 for i in 0..num_partitions - 1 {
1274 let len = read_u24le(&src[part1_off + part2_off + i * 3..][..3])? as usize;
1275 validate!(size >= len);
1276 part_offs[i] = data_start;
1280 part_offs[num_partitions - 1] = data_start;
1281 for start in part_offs[num_partitions..].iter_mut() {
1282 *start = data_start;
1284 let mut bc_src = unsafe {
1285 let mut arr: [BoolCoder; 8] = std::mem::MaybeUninit::uninit().assume_init();
1286 for (bc, &off) in arr.iter_mut().zip(part_offs.iter()) {
1287 std::ptr::write(bc, BoolCoder::new(&src[off..]).unwrap());
1292 let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT);
1293 let ret = supp.pool_u8.get_free();
1295 return Err(DecoderError::AllocError);
1297 let mut buf = ret.unwrap();
1298 if buf.get_info() != vinfo {
1300 supp.pool_u8.reset();
1301 supp.pool_u8.prealloc_video(vinfo, 4)?;
1302 let ret = supp.pool_u8.get_free();
1304 return Err(DecoderError::AllocError);
1308 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap();
1311 self.pcache.reset();
1312 let mut rframe = VP8Ref::Last;
1313 let loop_filter = self.dstate.version != 3 && self.dstate.loop_filter_level > 0;
1314 for mb_y in 0..self.mb_h {
1315 let bc_main = &mut bc_src[mb_y & (num_partitions - 1)];
1316 for mb_x in 0..self.mb_w {
1317 if self.dstate.update_seg_map {
1318 self.decode_mb_features(&mut bc, mb_idx)?;
1320 if self.dstate.segmentation {
1321 self.set_cur_segment(mb_idx);
1323 let mb_coeff_skip = if mb_no_coeff_skip {
1324 bc.read_prob(prob_skip_false)
1326 self.dstate.has_y2 = true;
1327 if self.dstate.is_intra {
1328 let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS);
1329 if ymode == PredMode::BPred {
1330 self.dstate.has_y2 = false;
1331 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1334 let top_mode = if (y > 0) || (mb_y > 0) {
1335 self.ymodes[iidx + x - self.ymode_stride]
1339 let left_mode = if (x > 0) || (mb_x > 0) {
1340 self.ymodes[iidx + x - 1]
1344 let top_idx = top_mode.to_b_index();
1345 let left_idx = left_mode.to_b_index();
1346 let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]);
1347 self.ymodes[iidx + x] = bmode;
1349 iidx += self.ymode_stride;
1352 self.fill_ymode(mb_x, mb_y, ymode);
1354 let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS);
1355 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1356 self.mb_info[mb_idx].ymode = ymode;
1357 self.mb_info[mb_idx].uvmode = uvmode;
1358 self.mb_info[mb_idx].rframe = VP8Ref::Intra;
1359 } else if !bc.read_prob(self.dstate.prob_intra_pred) {
1360 let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob);
1361 if ymode == PredMode::BPred {
1362 self.dstate.has_y2 = false;
1363 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1366 let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS);
1367 self.ymodes[iidx + x] = bmode;
1369 iidx += self.ymode_stride;
1372 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1374 let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob);
1375 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1376 self.mb_info[mb_idx].ymode = ymode;
1377 self.mb_info[mb_idx].uvmode = uvmode;
1378 self.mb_info[mb_idx].rframe = VP8Ref::Intra;
1379 self.fill_mv(mb_x, mb_y, ZERO_MV);
1381 rframe = if !bc.read_prob(self.dstate.prob_last_pred) {
1383 } else if !bc.read_prob(self.dstate.prob_gold_pred) {
1389 let frm_sign = self.get_frame_sign(rframe);
1390 let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y, frm_sign);
1391 let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs);
1394 VPMBType::InterNearest => {
1395 self.fill_mv(mb_x, mb_y, nearest_mv);
1397 VPMBType::InterNear => {
1398 self.fill_mv(mb_x, mb_y, near_mv);
1400 VPMBType::InterNoMV => {
1401 self.fill_mv(mb_x, mb_y, ZERO_MV);
1403 VPMBType::InterMV => {
1404 let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]);
1405 let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]);
1406 let new_mv = pred_mv + MV{ x: dmx, y: dmy };
1407 self.fill_mv(mb_x, mb_y, new_mv);
1409 VPMBType::InterFourMV => {
1410 self.dstate.has_y2 = false;
1411 self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?;
1413 _ => unreachable!(),
1416 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1417 self.mb_info[mb_idx].mb_type = mbtype;
1418 self.mb_info[mb_idx].ymode = PredMode::Inter;
1419 self.mb_info[mb_idx].uvmode = PredMode::Inter;
1420 self.mb_info[mb_idx].rframe = rframe;
1422 let has_coeffs = if !mb_coeff_skip {
1423 self.decode_residue(bc_main, mb_x)
1425 let y2_left = self.pcache.y2_pred_left;
1426 let y2_top = self.pcache.y2_pred.data[self.pcache.y2_pred.xpos + mb_x - self.pcache.y2_pred.stride];
1427 self.pcache.reset_left();
1428 if !self.dstate.has_y2 {
1429 self.pcache.y2_pred_left = y2_left;
1430 self.pcache.y2_pred.data[self.pcache.y2_pred.xpos + mb_x] = y2_top;
1434 match self.mb_info[mb_idx].mb_type {
1435 VPMBType::Intra => {
1436 self.recon_intra_mb(&mut dframe, mb_x, mb_y, mb_coeff_skip)?;
1439 self.recon_inter_mb(&mut dframe, mb_x, mb_y, mb_coeff_skip, rframe);
1443 if let Some(loop_str) = self.dstate.force_loop_str {
1444 self.mb_info[mb_idx].loop_str = loop_str;
1446 self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level;
1448 if self.dstate.lf_delta {
1449 let mut loop_str = self.mb_info[mb_idx].loop_str as i8;
1450 let idx = match self.mb_info[mb_idx].rframe {
1453 VP8Ref::Golden => 2,
1454 VP8Ref::AltRef => 3,
1456 loop_str += self.dstate.lf_frame_delta[idx];
1457 let idx = match self.mb_info[mb_idx].mb_type {
1458 VPMBType::Intra => 0,
1459 VPMBType::InterNoMV => 1,
1460 VPMBType::InterFourMV => 3,
1463 if self.mb_info[mb_idx].mb_type != VPMBType::Intra || self.mb_info[mb_idx].ymode == PredMode::BPred {
1464 loop_str += self.dstate.lf_mode_delta[idx];
1466 self.mb_info[mb_idx].loop_str = loop_str.max(0).min(63) as u8;
1468 self.mb_info[mb_idx].inner_filt = has_coeffs || (self.mb_info[mb_idx].ymode == PredMode::BPred) || (self.mb_info[mb_idx].mb_type == VPMBType::InterFourMV);
1472 self.pcache.update_row();
1473 self.pcache.reset_left();
1477 for mb_y in 0..self.mb_h {
1478 for mb_x in 0..self.mb_w {
1479 let loop_str = self.mb_info[mb_idx].loop_str;
1481 self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str, self.mb_info[mb_idx].inner_filt);
1489 self.dstate.restore(&self.tmp_probs);
1494 let last = self.shuf.get_last().unwrap();
1495 self.shuf.add_altref_frame(last);
1498 let golden = self.shuf.get_golden().unwrap();
1499 self.shuf.add_altref_frame(golden);
1504 4 => self.shuf.add_golden_frame(buf.clone()),
1506 let last = self.shuf.get_last().unwrap();
1507 self.shuf.add_golden_frame(last);
1510 let altref = self.shuf.get_altref().unwrap();
1511 self.shuf.add_golden_frame(altref);
1516 self.shuf.add_altref_frame(buf.clone());
1519 self.shuf.add_frame(buf.clone());
1522 let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf));
1523 frm.set_keyframe(self.dstate.is_intra);
1524 frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P });
1527 fn flush(&mut self) {
1532 impl NAOptionHandler for VP8Decoder {
1533 fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] }
1534 fn set_options(&mut self, _options: &[NAOption]) { }
1535 fn query_option_value(&self, _name: &str) -> Option<NAValue> { None }
1538 pub fn get_decoder() -> Box<dyn NADecoder + Send> {
1539 Box::new(VP8Decoder::new())
1544 use nihav_core::codecs::RegisteredDecoders;
1545 use nihav_core::demuxers::RegisteredDemuxers;
1546 use nihav_codec_support::test::dec_video::*;
1547 use crate::duck_register_all_decoders;
1548 use crate::duck_register_all_demuxers;
1550 fn test_vp8_core(name: &str, hash: [u32; 4]) {
1551 let mut dmx_reg = RegisteredDemuxers::new();
1552 duck_register_all_demuxers(&mut dmx_reg);
1553 let mut dec_reg = RegisteredDecoders::new();
1554 duck_register_all_decoders(&mut dec_reg);
1556 test_decoding("ivf", "vp8", name, None, &dmx_reg,
1557 &dec_reg, ExpectedTestResult::MD5(hash));
1562 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-001.ivf",
1563 [0xfad12607, 0x4e1bd536, 0x3d43b9d1, 0xcadddb71]);
1567 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-002.ivf",
1568 [0x182f03dd, 0x264ebac0, 0x4e24c7c9, 0x499d7cdb]);
1572 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-003.ivf",
1573 [0xe5fe668b, 0x03390002, 0x2c3eb0ba, 0x76a44bd1]);
1577 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-004.ivf",
1578 [0x95097ce9, 0x808c1d47, 0xe03f99c4, 0x8ad111ec]);
1582 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-005.ivf",
1583 [0x0f469e4f, 0xd1dea533, 0xe5580688, 0xb2d242ff]);
1587 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-006.ivf",
1592 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-007.ivf",
1593 [0x92526913, 0xd89b6a9b, 0x00f2d602, 0xdef08bce]);
1597 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-008.ivf",
1598 [0x1676d1eb, 0x19bd175e, 0xc5bb10f5, 0xd49f24f1]);
1602 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-009.ivf",
1603 [0x19201a2d, 0x535bd82f, 0x41c1a565, 0x8def5379]);
1607 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-010.ivf",
1608 [0x61d05919, 0xa9883d9f, 0x215eb3f2, 0xdb63eb13]);
1612 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-011.ivf",
1613 [0x1a0afe5e, 0x70512a03, 0x323a8f11, 0x76bcf022]);
1617 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-012.ivf",
1618 [0x4ea997c8, 0x0dc2087e, 0x6deec81f, 0x1ecf6668]);
1622 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-013.ivf",
1623 [0x93169305, 0xd3054327, 0xbe3cc074, 0xf0773a75]);
1627 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-014.ivf",
1632 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-015.ivf",
1633 [0x23b9cc58, 0x2e344726, 0xe76cda09, 0x2b416bcf]);
1637 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-016.ivf",
1638 [0x55e889d2, 0x2f99718c, 0xf6936d55, 0xf8ade12b]);
1642 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-017.ivf",
1643 [0x95a68ffb, 0x228d1d8c, 0x6ee54f16, 0xa10fb9eb]);
1647 const DC_QUANTS: [i16; 128] = [
1648 4, 5, 6, 7, 8, 9, 10, 10,
1649 11, 12, 13, 14, 15, 16, 17, 17,
1650 18, 19, 20, 20, 21, 21, 22, 22,
1651 23, 23, 24, 25, 25, 26, 27, 28,
1652 29, 30, 31, 32, 33, 34, 35, 36,
1653 37, 37, 38, 39, 40, 41, 42, 43,
1654 44, 45, 46, 46, 47, 48, 49, 50,
1655 51, 52, 53, 54, 55, 56, 57, 58,
1656 59, 60, 61, 62, 63, 64, 65, 66,
1657 67, 68, 69, 70, 71, 72, 73, 74,
1658 75, 76, 76, 77, 78, 79, 80, 81,
1659 82, 83, 84, 85, 86, 87, 88, 89,
1660 91, 93, 95, 96, 98, 100, 101, 102,
1661 104, 106, 108, 110, 112, 114, 116, 118,
1662 122, 124, 126, 128, 130, 132, 134, 136,
1663 138, 140, 143, 145, 148, 151, 154, 157
1666 const AC_QUANTS: [i16; 128] = [
1667 4, 5, 6, 7, 8, 9, 10, 11,
1668 12, 13, 14, 15, 16, 17, 18, 19,
1669 20, 21, 22, 23, 24, 25, 26, 27,
1670 28, 29, 30, 31, 32, 33, 34, 35,
1671 36, 37, 38, 39, 40, 41, 42, 43,
1672 44, 45, 46, 47, 48, 49, 50, 51,
1673 52, 53, 54, 55, 56, 57, 58, 60,
1674 62, 64, 66, 68, 70, 72, 74, 76,
1675 78, 80, 82, 84, 86, 88, 90, 92,
1676 94, 96, 98, 100, 102, 104, 106, 108,
1677 110, 112, 114, 116, 119, 122, 125, 128,
1678 131, 134, 137, 140, 143, 146, 149, 152,
1679 155, 158, 161, 164, 167, 170, 173, 177,
1680 181, 185, 189, 193, 197, 201, 205, 209,
1681 213, 217, 221, 225, 229, 234, 239, 245,
1682 249, 254, 259, 264, 269, 274, 279, 284