1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::codecs::{MV, ZERO_MV};
4 use super::vpcommon::*;
6 use super::vp78data::*;
10 #[derive(Clone,Copy,PartialEq,Debug,Default)]
20 pub struct VP8Shuffler {
21 lastframe: Option<NAVideoBufferRef<u8>>,
22 goldframe: Option<NAVideoBufferRef<u8>>,
23 altframe: Option<NAVideoBufferRef<u8>>,
27 pub fn new() -> Self { Self::default() }
28 pub fn clear(&mut self) {
29 self.lastframe = None;
30 self.goldframe = None;
33 pub fn add_frame(&mut self, buf: NAVideoBufferRef<u8>) {
34 self.lastframe = Some(buf);
36 pub fn add_golden_frame(&mut self, buf: NAVideoBufferRef<u8>) {
37 self.goldframe = Some(buf);
39 pub fn add_altref_frame(&mut self, buf: NAVideoBufferRef<u8>) {
40 self.altframe = Some(buf);
42 pub fn get_last(&mut self) -> Option<NAVideoBufferRef<u8>> {
43 self.lastframe.as_ref().cloned()
45 pub fn get_golden(&mut self) -> Option<NAVideoBufferRef<u8>> {
46 self.goldframe.as_ref().cloned()
48 pub fn get_altref(&mut self) -> Option<NAVideoBufferRef<u8>> {
49 self.altframe.as_ref().cloned()
51 pub fn has_refs(&self) -> bool {
52 self.lastframe.is_some()
57 coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4],
61 pub const COEF_NE_TREE: &[VPTreeDef<DCTToken>] = &[
62 VPTreeDef::Value(DCTToken::Zero), VPTreeDef::Index(2),
63 VPTreeDef::Value(DCTToken::One), VPTreeDef::Index(4),
64 VPTreeDef::Index(6), VPTreeDef::Index(10),
65 VPTreeDef::Value(DCTToken::Two), VPTreeDef::Index(8),
66 VPTreeDef::Value(DCTToken::Three), VPTreeDef::Value(DCTToken::Four),
67 VPTreeDef::Index(12), VPTreeDef::Index(14),
68 VPTreeDef::Value(DCTToken::Cat1), VPTreeDef::Value(DCTToken::Cat2),
69 VPTreeDef::Index(16), VPTreeDef::Index(18),
70 VPTreeDef::Value(DCTToken::Cat3), VPTreeDef::Value(DCTToken::Cat4),
71 VPTreeDef::Value(DCTToken::Cat5), VPTreeDef::Value(DCTToken::Cat6)
74 fn decode_subblock(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 {
75 const COEF_BANDS: [usize; 16] = [ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
78 let start = if ctype != 0 { 0 } else { 1 };
80 let mut cval = pctx as usize;
81 for idx in start..16 {
82 let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval];
83 let tok = if cval != 0 || idx == start {
84 bc.read_tree(COEF_TREE, probs)
86 bc.read_tree(COEF_NE_TREE, &probs[1..])
88 if tok == DCTToken::EOB { break; }
89 let level = expand_token(bc, tok);
90 coeffs[DEFAULT_SCAN_ORDER[idx]] = level.wrapping_mul(sbparams.qmat[idx]);
91 cval = level.abs().min(2) as usize;
94 if has_nz > 0 { 1 } else { 0 }
97 #[derive(Clone,Copy,Default)]
107 #[derive(Clone,Copy,Default)]
115 kf_ymode_prob: [u8; 4],
116 kf_uvmode_prob: [u8; 3],
118 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
119 mv_probs: [[u8; 19]; 2],
121 segment_probs: [u8; 3],
125 struct DecoderState {
127 loop_filter_level: u8,
133 kf_ymode_prob: [u8; 4],
134 kf_uvmode_prob: [u8; 3],
139 sign_bias: [bool; 2],
141 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
142 mv_probs: [[u8; 19]; 2],
145 update_seg_map: bool,
146 force_quant: Option<u8>,
147 force_loop_str: Option<u8>,
148 segment_probs: [u8; 3],
150 seg_feature_mode: bool,
153 lf_frame_delta: [i8; 4],
154 lf_mode_delta: [i8; 4],
158 ipred_ctx_y: IPredContext,
159 ipred_ctx_u: IPredContext,
160 ipred_ctx_v: IPredContext,
164 fn reset(&mut self) {
165 const VP8_DEFAULT_MV_PROBS: [[u8; 19]; 2] = [
166 [ 162, 128, 225, 146, 172, 147, 214, 39, 156, 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 ],
167 [ 164, 128, 204, 170, 119, 235, 140, 230, 228, 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 ]
170 self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS);
171 self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS);
172 self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS);
173 self.mv_probs.copy_from_slice(&VP8_DEFAULT_MV_PROBS);
174 self.segment_probs = [255; 3];
175 self.seg = [Segment::default(); 4];
177 fn restore(&mut self, dst: &SavedProbs) {
178 self.kf_ymode_prob = dst.kf_ymode_prob;
179 self.kf_uvmode_prob = dst.kf_uvmode_prob;
180 self.coef_probs = dst.coef_probs;
181 self.mv_probs = dst.mv_probs;
182 self.segment_probs = dst.segment_probs;
184 fn save(&self, dst: &mut SavedProbs) {
185 dst.kf_ymode_prob = self.kf_ymode_prob;
186 dst.kf_uvmode_prob = self.kf_uvmode_prob;
187 dst.coef_probs = self.coef_probs;
188 dst.mv_probs = self.mv_probs;
189 // dst.segment_probs = self.segment_probs;
193 fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 19]) -> i16 {
194 const LONG_VECTOR_ORDER: [usize; 9] = [ 0, 1, 2, 9, 8, 7, 6, 5, 4 ];
196 let val = if !bc.read_prob(probs[0]) {
197 bc.read_tree(SMALL_MV_TREE, &probs[2..9])
199 let raw_probs = &probs[9..];
201 for ord in LONG_VECTOR_ORDER.iter() {
202 raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord;
204 if (raw & 0x3F0) != 0 {
205 raw |= (bc.read_prob(raw_probs[3]) as i16) << 3;
211 if (val == 0) || !bc.read_prob(probs[1]) {
219 info: NACodecInfoRef,
226 mb_info: Vec<MBInfo>,
230 ymodes: Vec<PredMode>,
232 uvmodes: Vec<PredMode>,
233 uvmode_stride: usize,
235 dstate: DecoderState,
237 tmp_probs: SavedProbs,
239 coeffs: [[i16; 16]; 25],
240 qmat: [[[i16; 16]; 3]; 5],
242 mc_buf: NAVideoBufferRef<u8>,
249 let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap();
250 let mc_buf = vt.get_vbuf().unwrap();
252 info: NACodecInfoRef::default(),
254 shuf: VP8Shuffler::new(),
268 dstate: DecoderState::default(),
269 pcache: PredCache::new(),
270 tmp_probs: SavedProbs::default(),
272 coeffs: [[0; 16]; 25],
273 qmat: [[[0; 16]; 3]; 5],
280 fn set_dimensions(&mut self, width: usize, height: usize) {
281 if (width == self.width) && (height == self.height) {
285 self.height = height;
286 self.mb_w = (self.width + 15) >> 4;
287 self.mb_h = (self.height + 15) >> 4;
288 self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
289 self.mv_stride = self.mb_w * 4;
290 self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV);
292 self.ymode_stride = self.mb_w * 4;
293 self.uvmode_stride = self.mb_w;
294 self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default());
295 self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default());
297 self.pcache.resize(self.mb_w);
299 self.seg_map.clear();
300 self.seg_map.resize(self.mb_w * self.mb_h, 0);
302 fn update_segmentation(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
303 self.dstate.update_seg_map = bc.read_bool();
305 self.dstate.seg_feature_mode = bc.read_bool();
306 for seg in self.dstate.seg.iter_mut() {
308 let quant_upd_val = bc.read_bits(7) as i8;
309 let quant_upd_sign = bc.read_bool();
310 seg.quant = if !quant_upd_sign { quant_upd_val } else { -quant_upd_val };
313 for seg in self.dstate.seg.iter_mut() {
315 let lf_upd_val = bc.read_bits(6) as i8;
316 let lf_upd_sign = bc.read_bool();
317 seg.lf = if !lf_upd_sign { lf_upd_val } else { -lf_upd_val };
321 if self.dstate.update_seg_map {
322 self.tmp_probs.segment_probs = self.dstate.segment_probs;
323 for prob in self.dstate.segment_probs.iter_mut() {
325 *prob = bc.read_byte();
331 fn mb_lf_adjustments(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
332 self.dstate.lf_delta = bc.read_bool();
333 if self.dstate.lf_delta {
335 for frame_delta in self.dstate.lf_frame_delta.iter_mut() {
337 let delta_magn = bc.read_bits(6) as i8;
338 let delta_sign = bc.read_bool();
339 *frame_delta = if !delta_sign { delta_magn } else { -delta_magn };
342 for mode_delta in self.dstate.lf_mode_delta.iter_mut() {
344 let delta_magn = bc.read_bits(6) as i8;
345 let delta_sign = bc.read_bool();
346 *mode_delta = if !delta_sign { delta_magn } else { -delta_magn };
353 fn read_delta_quant(bc: &mut BoolCoder, y_ac_q: usize) -> DecoderResult<usize> {
355 let delta = bc.read_bits(4) as usize;
357 Ok(y_ac_q.saturating_sub(delta))
359 Ok((y_ac_q + delta).min(127))
365 fn quant_indices(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
366 let y_ac_q = bc.read_bits(7) as usize;
367 let y_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
368 let y2_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
369 let y2_ac_q = Self::read_delta_quant(bc, y_ac_q)?;
370 let uv_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
371 let uv_ac_q = Self::read_delta_quant(bc, y_ac_q)?;
372 self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
376 fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
381 if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) {
382 self.dstate.coef_probs[i][j][k][l] = bc.read_byte();
390 fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
391 const MV_UPDATE_PROBS: [[u8; 19]; 2] = [
392 [ 237, 246, 253, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 250, 250, 252, 254, 254 ],
393 [ 231, 243, 245, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 251, 251, 254, 254, 254 ]
397 if bc.read_prob(MV_UPDATE_PROBS[comp][i]) {
398 self.dstate.mv_probs[comp][i] = bc.read_probability();
404 fn decode_mb_features(&mut self, bc: &mut BoolCoder, mb_idx: usize) -> DecoderResult<()> {
405 let segment_id = bc.read_tree(FEATURE_TREE, &self.dstate.segment_probs);
406 self.seg_map[mb_idx] = segment_id as u8;
410 fn set_cur_segment(&mut self, mb_idx: usize) {
411 self.dstate.force_quant = Some(self.seg_map[mb_idx]);
412 let seg_id = self.seg_map[mb_idx] as usize;
413 let segment = &self.dstate.seg[seg_id];
414 let loop_str = if self.dstate.seg_feature_mode {
417 (i16::from(self.dstate.loop_filter_level) + i16::from(segment.lf)).max(0).min(63) as u8
419 self.dstate.force_loop_str = Some(loop_str);
421 fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize) -> bool {
422 let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 };
423 let mut sbparams = SBParams {
424 qmat: &self.qmat[qmat_idx][2],
425 coef_probs: &self.dstate.coef_probs,
427 let mut has_ac = [false; 25];
428 let mut coded = false;
430 if self.dstate.has_y2 {
431 let pred = &self.pcache.y2_pred;
432 let pidx = pred.xpos + mb_x;
433 let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride];
435 let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams);
436 self.pcache.y2_pred.data[pidx] = has_nz;
437 self.pcache.y2_pred_left = has_nz;
438 has_ac[24] = has_nz > 0;
439 coded |= has_ac[24] | (self.coeffs[24][0] != 0);
443 let pred = &mut self.pcache.y2_pred;
444 let pidx = pred.xpos + mb_x;
445 pred.data[pidx] = pred.data[pidx - pred.stride];
449 sbparams.qmat = &self.qmat[qmat_idx][0];
453 let pred = &self.pcache.y_pred;
454 let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride;
455 let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride];
457 let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams);
458 self.pcache.y_pred.data[pidx] = has_nz;
459 self.pcache.y_pred_left[by] = has_nz;
460 has_ac[i] = has_nz > 0;
461 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
463 sbparams.qmat = &self.qmat[qmat_idx][1];
466 let by = (i >> 1) & 1;
467 let pred = &self.pcache.u_pred;
468 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
469 let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride];
471 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
472 self.pcache.u_pred.data[pidx] = has_nz;
473 self.pcache.u_pred_left[by] = has_nz;
474 has_ac[i] = has_nz > 0;
475 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
479 let by = (i >> 1) & 1;
480 let pred = &self.pcache.v_pred;
481 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
482 let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride];
484 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
485 self.pcache.v_pred.data[pidx] = has_nz;
486 self.pcache.v_pred_left[by] = has_nz;
487 has_ac[i] = has_nz > 0;
488 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
491 if self.dstate.has_y2 {
492 let y2block = &mut self.coeffs[24];
495 } else if y2block[0] != 0 {
499 self.coeffs[i][0] = self.coeffs[24][i];
504 idct4x4(&mut self.coeffs[i]);
505 } else if self.coeffs[i][0] != 0 {
506 idct4x4_dc(&mut self.coeffs[i]);
513 fn set_single_qmat(qmat: &mut [[i16; 16]; 3], y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
514 qmat[0][0] = DC_QUANTS[y_dc_q];
516 qmat[0][i] = AC_QUANTS[y_ac_q];
518 qmat[1][0] = DC_QUANTS[uv_dc_q].min(132);
520 qmat[1][i] = AC_QUANTS[uv_ac_q];
522 qmat[2][0] = DC_QUANTS[y2_dc_q] * 2;
524 qmat[2][i] = (i32::from(AC_QUANTS[y2_ac_q]) * 155 / 100).max(8) as i16;
527 fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
528 Self::set_single_qmat(&mut self.qmat[0], y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
529 if self.dstate.segmentation {
530 for (qmat, seg) in self.qmat[1..].iter_mut().zip(self.dstate.seg.iter()) {
531 let q = if self.dstate.seg_feature_mode {
532 seg.quant.max(0) as usize
534 ((y_ac_q as i16) + i16::from(seg.quant)).max(0).min(127) as usize
536 Self::set_single_qmat(qmat, q, q, q, q, q, q);
540 fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) {
541 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
544 self.ymodes[iidx + x] = ymode;
546 iidx += self.ymode_stride;
549 fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) {
550 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
553 self.mvs[iidx + x] = mv;
555 iidx += self.mb_w * 4;
558 fn get_frame_sign(&self, rframe: VP8Ref) -> bool {
560 VP8Ref::Golden => self.dstate.sign_bias[0],
561 VP8Ref::AltRef => self.dstate.sign_bias[1],
565 fn find_mv_pred(&self, mb_x: usize, mb_y: usize, frm_sign: bool) -> ([u8; 4], MV, MV, MV) {
566 const VP8_MV_PROBS: [[u8; 4]; 6] = [
571 [ 159, 134, 128, 34 ],
572 [ 234, 188, 128, 28 ]
575 const OFFS: [(u8, u8, u8); 3] = [(0, 1, 2), (1, 0, 2), (1, 1, 1)];
576 let mut mvs = [ZERO_MV; 3];
577 let mut mvc = [0; 3];
581 let mut nearest_mv = ZERO_MV;
582 let mut near_mv = ZERO_MV;
584 for &(x, y, weight) in OFFS.iter() {
585 let mv = if (x == 0 || mb_x > 0) && (y == 0 || mb_y > 0) {
586 let x = usize::from(x);
587 let y = usize::from(y);
588 let mb_idx = mb_x - x + (mb_y - y) * self.mb_w;
589 if self.mb_info[mb_idx].mb_type.is_intra() {
592 if self.mb_info[mb_idx].mb_type == VPMBType::InterFourMV {
595 let rsign = self.get_frame_sign(self.mb_info[mb_idx].rframe);
596 let mut mv_idx = mb_x * 4 + mb_y * 4 * self.mv_stride;
598 mv_idx += self.mv_stride * 3 - 1;
599 } else if x == 0 { // top
600 mv_idx -= self.mv_stride;
603 mv_idx -= self.mv_stride + 1;
605 if rsign == frm_sign {
613 let mut found = false;
623 mvc[num_mv] = weight;
652 let mut best_mv = mvs[0];
655 for (&mv, &count) in mvs[..num_mv].iter().zip(mvc.iter()) {
657 if nearest_mv == ZERO_MV {
670 for (&mv, &count) in mvs[..num_mv].iter().zip(mvc.iter()) {
677 let best_mv = self.clip_mv(best_mv, mb_x, mb_y);
679 let mvprobs = [VP8_MV_PROBS[ct[0] as usize][0],
680 VP8_MV_PROBS[ct[1] as usize][1],
681 VP8_MV_PROBS[ct[2] as usize][2],
682 VP8_MV_PROBS[ct[3] as usize][3]];
684 (mvprobs, self.clip_mv(nearest_mv, mb_x, mb_y), self.clip_mv(near_mv, mb_x, mb_y), best_mv)
686 fn clip_mv(&self, mv: MV, mb_x: usize, mb_y: usize) -> MV {
687 let pos_x = (mb_x as i32) * 16 * 4;
688 let pos_y = (mb_y as i32) * 16 * 4;
689 let mv_x = (pos_x + i32::from(mv.x)).max(-16 * 4).min((self.mb_w as i32) * 16 * 4);
690 let mv_y = (pos_y + i32::from(mv.y)).max(-16 * 4).min((self.mb_h as i32) * 16 * 4);
691 MV {x: (mv_x - pos_x) as i16, y: (mv_y - pos_y) as i16 }
693 fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV {
694 const SUB_MV_REF_PROBS: [[u8; 3]; 5] = [
702 let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride;
703 let left_mv = if (mb_x > 0) || (bx > 0) {
708 let top_mv = if (mb_y > 0) || (by > 0) {
709 self.mvs[mvidx - self.mv_stride]
714 let idx = if left_mv == top_mv {
715 if left_mv == ZERO_MV {
720 } else if top_mv == ZERO_MV {
722 } else if left_mv == ZERO_MV {
727 let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS[idx]);
729 SubMVRef::Left => left_mv,
730 SubMVRef::Above => top_mv,
731 SubMVRef::Zero => ZERO_MV,
733 let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]);
734 let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]);
735 pred_mv + MV{ x: dmx, y: dmy }
739 fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> {
740 let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS);
741 let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
743 MVSplitMode::TopBottom => {
744 let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
746 for x in 0..4 { self.mvs[mvidx + x] = top_mv; }
747 mvidx += self.mv_stride;
749 let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv);
751 for x in 0..4 { self.mvs[mvidx + x] = bot_mv; }
752 mvidx += self.mv_stride;
755 MVSplitMode::LeftRight => {
756 let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
757 self.mvs[mvidx + 1] = left_mv;
758 let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv);
760 self.mvs[mvidx + 0] = left_mv;
761 self.mvs[mvidx + 1] = left_mv;
762 self.mvs[mvidx + 2] = right_mv;
763 self.mvs[mvidx + 3] = right_mv;
764 mvidx += self.mv_stride;
767 MVSplitMode::Quarters => {
768 for y in (0..4).step_by(2) {
769 for x in (0..4).step_by(2) {
770 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
771 self.mvs[mvidx + x + 1] = self.mvs[mvidx + x];
774 self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x];
776 mvidx += self.mv_stride * 2;
779 MVSplitMode::Sixteenths => {
782 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
784 mvidx += self.mv_stride;
791 fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool) {
793 let ydst = &mut dframe.data[dframe.offset[0]..];
794 let ystride = dframe.stride[0];
795 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
798 add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]);
803 let dst = &mut dframe.data[0..];
804 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
805 let ustride = dframe.stride[1];
806 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
807 let vstride = dframe.stride[2];
810 add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]);
811 add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]);
817 fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, mb_coeff_skip: bool) -> DecoderResult<()> {
818 let mb_idx = mb_x + mb_y * self.mb_w;
819 let has_top = mb_y > 0;
820 let has_left = mb_x > 0;
821 let ydst = &mut dframe.data[dframe.offset[0]..];
822 let ystride = dframe.stride[0];
823 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
824 let ipred_ctx_y = &mut self.dstate.ipred_ctx_y;
825 ipred_ctx_y.has_top = has_top;
826 ipred_ctx_y.has_left = has_left;
827 let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred;
829 ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16);
830 if !has_top && self.mb_info[mb_idx].ymode == PredMode::VPred {
831 IPred16x16::ipred_const(ydst, yoff, ystride, 0x7F)
832 } else if !has_left && self.mb_info[mb_idx].ymode == PredMode::HPred {
833 IPred16x16::ipred_const(ydst, yoff, ystride, 0x81)
835 match self.mb_info[mb_idx].ymode {
836 PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y),
837 PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y),
838 PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y),
839 PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y),
844 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
845 let mut tr_save = [0x7Fu8; 16];
846 let tr_edge = if has_top { ydst[yoff - ystride + 15] } else { 0x7F };
849 ipred_ctx_y.has_left = has_left || x > 0;
850 let bmode = self.ymodes[iidx + x];
851 let cur_yoff = yoff + x * 4;
852 let has_tr = ipred_ctx_y.has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1)));
853 let has_dl = ipred_ctx_y.has_left && (x == 0) && (y < 3);
854 ipred_ctx_y.fill(ydst, cur_yoff, ystride,
855 if has_tr { 8 } else { 4 },
856 if has_dl { 8 } else { 4 });
858 if !has_top && y == 0 && (has_left || x > 0) && bmode != PredMode::TMPred {
859 ipred_ctx_y.top = [0x7F; 16];
860 ipred_ctx_y.tl = 0x7F;
862 if !has_left && x == 0 && (has_top || y > 0) && bmode != PredMode::TMPred {
863 ipred_ctx_y.left = [0x81; 16];
864 ipred_ctx_y.tl = 0x81;
866 if !has_left && !has_top && x == 0 && y == 0 && bmode != PredMode::DCPred {
867 ipred_ctx_y.top = [0x7F; 16];
868 ipred_ctx_y.left = [0x81; 16];
869 ipred_ctx_y.tl = 0x7F;
874 ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i];
878 tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4];
881 if (mb_x == self.mb_w - 1) && has_top && (x == 3) {
883 ipred_ctx_y.top[i + 4] = tr_edge;
887 PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y),
888 PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y),
889 PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y),
890 PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y),
891 PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y),
892 PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y),
893 PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y),
894 PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y),
895 PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y),
896 PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y),
900 add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]);
903 ipred_ctx_y.has_top = true;
905 iidx += self.ymode_stride;
908 let dst = &mut dframe.data[0..];
909 let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
910 let ustride = dframe.stride[1];
911 let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
912 let vstride = dframe.stride[2];
913 let ipred_ctx_u = &mut self.dstate.ipred_ctx_u;
914 let ipred_ctx_v = &mut self.dstate.ipred_ctx_v;
915 ipred_ctx_u.has_top = has_top;
916 ipred_ctx_v.has_top = has_top;
917 ipred_ctx_u.has_left = has_left;
918 ipred_ctx_v.has_left = has_left;
919 ipred_ctx_u.fill(dst, uoff, ustride, 8, 8);
920 ipred_ctx_v.fill(dst, voff, vstride, 8, 8);
922 if !has_top && self.mb_info[mb_idx].uvmode == PredMode::VPred {
923 IPred8x8::ipred_const(dst, uoff, ustride, 0x7F);
924 IPred8x8::ipred_const(dst, voff, vstride, 0x7F);
925 } else if !has_left && self.mb_info[mb_idx].uvmode == PredMode::HPred {
926 IPred8x8::ipred_const(dst, uoff, ustride, 0x81);
927 IPred8x8::ipred_const(dst, voff, vstride, 0x81);
929 match self.mb_info[mb_idx].uvmode {
930 PredMode::DCPred => {
931 IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u);
932 IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v);
935 IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u);
936 IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v);
939 IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u);
940 IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v);
942 PredMode::TMPred => {
943 IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u);
944 IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v);
950 self.add_residue(dframe, mb_x, mb_y, is_normal);
954 fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, mb_coeff_skip: bool, rframe: VP8Ref) {
955 let refframe = match rframe {
956 VP8Ref::Last => self.shuf.get_last(),
957 VP8Ref::Golden => self.shuf.get_golden(),
958 VP8Ref::AltRef => self.shuf.get_altref(),
959 VP8Ref::Intra => unreachable!(),
961 let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV;
962 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
963 let mc_buf = self.mc_buf.get_data_mut().unwrap();
965 let dst = &mut dframe.data[0..];
966 let ystride = dframe.stride[0];
967 let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
969 if self.dstate.version == 0 {
970 mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16,
971 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, mc_buf);
973 mc_block16x16_bilin(dst, yoff, ystride, mb_x * 16, mb_y * 16,
974 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, mc_buf);
979 if self.dstate.version == 0 {
980 mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
981 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, mc_buf);
983 mc_block4x4_bilin(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
984 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, mc_buf);
988 iidx += self.mv_stride;
992 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
993 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
994 let ustride = dframe.stride[1];
995 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
996 let vstride = dframe.stride[2];
998 let mut chroma_mv = self.mvs[iidx];
1000 if self.dstate.version == 0 {
1001 mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, mc_buf);
1002 mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, mc_buf);
1004 if self.dstate.version == 3 {
1008 mc_block8x8_bilin(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, mc_buf);
1009 mc_block8x8_bilin(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, mc_buf);
1014 let mut chroma_mv = self.mvs[iidx + x * 2] + self.mvs[iidx + x * 2 + 1]
1015 + self.mvs[iidx + x * 2 + self.mv_stride]
1016 + self.mvs[iidx + x * 2 + self.mv_stride + 1];
1017 if chroma_mv.x < 0 {
1022 if chroma_mv.y < 0 {
1030 if self.dstate.version == 3 {
1035 if self.dstate.version == 0 {
1036 mc_block4x4(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1037 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, mc_buf);
1038 mc_block4x4(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1039 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, mc_buf);
1041 mc_block4x4_bilin(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1042 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, mc_buf);
1043 mc_block4x4_bilin(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1044 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, mc_buf);
1047 uoff += ustride * 4;
1048 voff += vstride * 4;
1049 iidx += 2 * self.mv_stride;
1053 self.add_residue(dframe, mb_x, mb_y, true);
1056 fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8, filter_inner: bool) {
1057 const HIGH_EDGE_VAR_THR: [[u8; 64]; 2] = [
1059 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1060 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1061 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
1062 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
1064 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1065 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1066 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
1067 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
1070 let inner_thr = if self.dstate.loop_sharpness == 0 {
1073 let bound1 = i16::from(9 - self.dstate.loop_sharpness);
1074 let shift = (self.dstate.loop_sharpness + 3) >> 2;
1075 (i16::from(loop_str) >> shift).min(bound1).max(1)
1077 let blk_thr = i16::from(loop_str) * 2 + inner_thr;
1078 let edge_thr = blk_thr + 4;
1079 let hev_thr = i16::from(HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize]);
1081 let ystride = dframe.stride[0];
1082 let ustride = dframe.stride[1];
1083 let vstride = dframe.stride[2];
1084 let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
1085 let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride;
1086 let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride;
1088 let (loop_edge, loop_inner) = if self.dstate.lf_simple {
1089 (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc)
1091 (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc)
1095 loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr);
1096 if !self.dstate.lf_simple {
1097 loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr);
1098 loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr);
1103 loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, blk_thr, inner_thr, hev_thr);
1105 if !self.dstate.lf_simple {
1106 loop_inner(dframe.data, upos + 4, 1, ustride, 8, blk_thr, inner_thr, hev_thr);
1107 loop_inner(dframe.data, vpos + 4, 1, vstride, 8, blk_thr, inner_thr, hev_thr);
1112 loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr);
1113 if !self.dstate.lf_simple {
1114 loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr);
1115 loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr);
1120 loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, blk_thr, inner_thr, hev_thr);
1122 if !self.dstate.lf_simple {
1123 loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, blk_thr, inner_thr, hev_thr);
1124 loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, blk_thr, inner_thr, hev_thr);
1130 impl NADecoder for VP8Decoder {
1131 fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> {
1132 if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
1133 let fmt = YUV420_FORMAT;
1134 let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt);
1135 let myinfo = NACodecTypeInfo::Video(myvinfo);
1136 self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref();
1138 supp.pool_u8.set_dec_bufs(5);
1139 supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?;
1140 self.set_dimensions(myvinfo.get_width(), myvinfo.get_height());
1143 Err(DecoderError::InvalidData)
1146 #[allow(clippy::cognitive_complexity)]
1147 fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> {
1148 let src = pkt.get_buffer();
1149 validate!(src.len() > 4);
1151 let frame_tag = read_u24le(src.as_slice())?;
1152 self.dstate.is_intra = (frame_tag & 1) == 0;
1153 self.dstate.version = ((frame_tag >> 1) & 7) as u8;
1154 validate!(self.dstate.version <= 3);
1155 let _show_frame = ((frame_tag >> 4) & 1) != 0;
1156 let part1_off = if self.dstate.is_intra { 10 } else { 3 };
1157 let part2_off = (frame_tag >> 5) as usize;
1158 validate!(src.len() >= part2_off && part2_off > part1_off);
1160 if self.dstate.is_intra {
1161 validate!(src.len() > 10);
1162 let marker = read_u24be(&src[3..6])?;
1163 validate!(marker == 0x9D012A);
1164 let width_ = read_u16le(&src[6..8])?;
1165 let height_ = read_u16le(&src[8..10])?;
1166 let width = ((width_ + 1) & 0x3FFE) as usize;
1167 let height = ((height_ + 1) & 0x3FFE) as usize;
1168 // let hscale = width_ >> 14;
1169 // let vscale = height_ >> 14;
1171 validate!((width > 0) && (height > 0));
1172 self.set_dimensions(width, height);
1174 self.dstate.reset();
1176 if !self.shuf.has_refs() {
1177 return Err(DecoderError::MissingReference);
1181 let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?;
1183 if self.dstate.is_intra {
1184 let _color_space = bc.read_bool();
1185 let _clamping_type = bc.read_bool();
1188 self.dstate.segmentation = bc.read_bool();
1189 if self.dstate.segmentation {
1190 self.update_segmentation(&mut bc)?;
1192 self.dstate.update_seg_map = false;
1193 self.dstate.force_quant = None;
1194 self.dstate.force_loop_str = None;
1197 self.dstate.lf_simple = bc.read_bool();
1198 self.dstate.loop_filter_level = bc.read_bits(6) as u8;
1199 self.dstate.loop_sharpness = bc.read_bits(3) as u8;
1201 self.mb_lf_adjustments(&mut bc)?;
1203 let num_partitions = 1 << bc.read_bits(2);
1205 self.quant_indices(&mut bc)?;
1207 let (keep_probs, update_last, update_gf, update_ar) = if self.dstate.is_intra {
1208 let refresh_entropy_probs = bc.read_bool();
1209 (refresh_entropy_probs, true, 4, 4)
1211 let refresh_golden_frame = bc.read_bool();
1212 let refresh_alternate_frame = bc.read_bool();
1213 let copy_to_golden = if !refresh_golden_frame {
1216 validate!(copy_to_golden != 3);
1217 let copy_to_altref = if !refresh_alternate_frame {
1220 validate!(copy_to_altref != 3);
1221 self.dstate.sign_bias[0] = bc.read_bool();
1222 self.dstate.sign_bias[1] = bc.read_bool();
1223 let refresh_entropy_probs = bc.read_bool();
1224 let refresh_last = bc.read_bool();
1225 (refresh_entropy_probs, refresh_last, copy_to_golden, copy_to_altref)
1229 self.dstate.save(&mut self.tmp_probs);
1232 self.read_dct_coef_prob_upd(&mut bc)?;
1234 let mb_no_coeff_skip = bc.read_bool();
1235 let prob_skip_false = bc.read_byte();
1237 if !self.dstate.is_intra {
1238 self.dstate.prob_intra_pred = bc.read_byte();
1239 self.dstate.prob_last_pred = bc.read_byte();
1240 self.dstate.prob_gold_pred = bc.read_byte();
1243 self.dstate.kf_ymode_prob[i] = bc.read_byte();
1248 self.dstate.kf_uvmode_prob[i] = bc.read_byte();
1251 self.read_mv_prob_upd(&mut bc)?;
1254 let mut data_start = part1_off + part2_off + (num_partitions - 1) * 3;
1255 let mut part_offs = [0; 8];
1256 validate!(data_start <= src.len());
1257 let mut size = src.len() - data_start;
1258 for i in 0..num_partitions - 1 {
1259 let len = read_u24le(&src[part1_off + part2_off + i * 3..][..3])? as usize;
1260 validate!(size >= len);
1261 part_offs[i] = data_start;
1265 part_offs[num_partitions - 1] = data_start;
1266 for start in part_offs[num_partitions..].iter_mut() {
1267 *start = data_start;
1269 let mut bc_src = Vec::new();
1270 for &off in part_offs.iter() {
1271 bc_src.push(BoolCoder::new(&src[off..]).unwrap());
1274 let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT);
1275 let ret = supp.pool_u8.get_free();
1277 return Err(DecoderError::AllocError);
1279 let mut buf = ret.unwrap();
1280 if buf.get_info() != vinfo {
1282 supp.pool_u8.reset();
1283 supp.pool_u8.prealloc_video(vinfo, 4)?;
1284 let ret = supp.pool_u8.get_free();
1286 return Err(DecoderError::AllocError);
1290 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap();
1293 self.pcache.reset();
1294 let mut rframe = VP8Ref::Last;
1295 let loop_filter = self.dstate.version != 3 && self.dstate.loop_filter_level > 0;
1296 for mb_y in 0..self.mb_h {
1297 let bc_main = &mut bc_src[mb_y & (num_partitions - 1)];
1298 for mb_x in 0..self.mb_w {
1299 if self.dstate.update_seg_map {
1300 self.decode_mb_features(&mut bc, mb_idx)?;
1302 if self.dstate.segmentation {
1303 self.set_cur_segment(mb_idx);
1305 let mb_coeff_skip = if mb_no_coeff_skip {
1306 bc.read_prob(prob_skip_false)
1308 self.dstate.has_y2 = true;
1309 if self.dstate.is_intra {
1310 let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS);
1311 if ymode == PredMode::BPred {
1312 self.dstate.has_y2 = false;
1313 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1316 let top_mode = if (y > 0) || (mb_y > 0) {
1317 self.ymodes[iidx + x - self.ymode_stride]
1321 let left_mode = if (x > 0) || (mb_x > 0) {
1322 self.ymodes[iidx + x - 1]
1326 let top_idx = top_mode.to_b_index();
1327 let left_idx = left_mode.to_b_index();
1328 let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]);
1329 self.ymodes[iidx + x] = bmode;
1331 iidx += self.ymode_stride;
1334 self.fill_ymode(mb_x, mb_y, ymode);
1336 let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS);
1337 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1338 self.mb_info[mb_idx].ymode = ymode;
1339 self.mb_info[mb_idx].uvmode = uvmode;
1340 self.mb_info[mb_idx].rframe = VP8Ref::Intra;
1341 } else if !bc.read_prob(self.dstate.prob_intra_pred) {
1342 let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob);
1343 if ymode == PredMode::BPred {
1344 self.dstate.has_y2 = false;
1345 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1348 let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS);
1349 self.ymodes[iidx + x] = bmode;
1351 iidx += self.ymode_stride;
1354 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1356 let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob);
1357 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1358 self.mb_info[mb_idx].ymode = ymode;
1359 self.mb_info[mb_idx].uvmode = uvmode;
1360 self.mb_info[mb_idx].rframe = VP8Ref::Intra;
1361 self.fill_mv(mb_x, mb_y, ZERO_MV);
1363 rframe = if !bc.read_prob(self.dstate.prob_last_pred) {
1365 } else if !bc.read_prob(self.dstate.prob_gold_pred) {
1371 let frm_sign = self.get_frame_sign(rframe);
1372 let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y, frm_sign);
1373 let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs);
1376 VPMBType::InterNearest => {
1377 self.fill_mv(mb_x, mb_y, nearest_mv);
1379 VPMBType::InterNear => {
1380 self.fill_mv(mb_x, mb_y, near_mv);
1382 VPMBType::InterNoMV => {
1383 self.fill_mv(mb_x, mb_y, ZERO_MV);
1385 VPMBType::InterMV => {
1386 let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]);
1387 let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]);
1388 let new_mv = pred_mv + MV{ x: dmx, y: dmy };
1389 self.fill_mv(mb_x, mb_y, new_mv);
1391 VPMBType::InterFourMV => {
1392 self.dstate.has_y2 = false;
1393 self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?;
1395 _ => unreachable!(),
1398 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1399 self.mb_info[mb_idx].mb_type = mbtype;
1400 self.mb_info[mb_idx].ymode = PredMode::Inter;
1401 self.mb_info[mb_idx].uvmode = PredMode::Inter;
1402 self.mb_info[mb_idx].rframe = rframe;
1404 let has_coeffs = if !mb_coeff_skip {
1405 self.decode_residue(bc_main, mb_x)
1407 let y2_left = self.pcache.y2_pred_left;
1408 let y2_top = self.pcache.y2_pred.data[self.pcache.y2_pred.xpos + mb_x - self.pcache.y2_pred.stride];
1409 self.pcache.reset_left();
1410 if !self.dstate.has_y2 {
1411 self.pcache.y2_pred_left = y2_left;
1412 self.pcache.y2_pred.data[self.pcache.y2_pred.xpos + mb_x] = y2_top;
1416 match self.mb_info[mb_idx].mb_type {
1417 VPMBType::Intra => {
1418 self.recon_intra_mb(&mut dframe, mb_x, mb_y, mb_coeff_skip)?;
1421 self.recon_inter_mb(&mut dframe, mb_x, mb_y, mb_coeff_skip, rframe);
1425 if let Some(loop_str) = self.dstate.force_loop_str {
1426 self.mb_info[mb_idx].loop_str = loop_str;
1428 self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level;
1430 if self.dstate.lf_delta {
1431 let mut loop_str = self.mb_info[mb_idx].loop_str as i8;
1432 let idx = match self.mb_info[mb_idx].rframe {
1435 VP8Ref::Golden => 2,
1436 VP8Ref::AltRef => 3,
1438 loop_str += self.dstate.lf_frame_delta[idx];
1439 let idx = match self.mb_info[mb_idx].mb_type {
1440 VPMBType::Intra => 0,
1441 VPMBType::InterNoMV => 1,
1442 VPMBType::InterFourMV => 3,
1445 if self.mb_info[mb_idx].mb_type != VPMBType::Intra || self.mb_info[mb_idx].ymode == PredMode::BPred {
1446 loop_str += self.dstate.lf_mode_delta[idx];
1448 self.mb_info[mb_idx].loop_str = loop_str.max(0).min(63) as u8;
1450 self.mb_info[mb_idx].inner_filt = has_coeffs || (self.mb_info[mb_idx].ymode == PredMode::BPred) || (self.mb_info[mb_idx].mb_type == VPMBType::InterFourMV);
1454 self.pcache.update_row();
1455 self.pcache.reset_left();
1459 for mb_y in 0..self.mb_h {
1460 for mb_x in 0..self.mb_w {
1461 let loop_str = self.mb_info[mb_idx].loop_str;
1463 self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str, self.mb_info[mb_idx].inner_filt);
1471 self.dstate.restore(&self.tmp_probs);
1476 let last = self.shuf.get_last().unwrap();
1477 self.shuf.add_altref_frame(last);
1480 let golden = self.shuf.get_golden().unwrap();
1481 self.shuf.add_altref_frame(golden);
1486 4 => self.shuf.add_golden_frame(buf.clone()),
1488 let last = self.shuf.get_last().unwrap();
1489 self.shuf.add_golden_frame(last);
1492 let altref = self.shuf.get_altref().unwrap();
1493 self.shuf.add_golden_frame(altref);
1498 self.shuf.add_altref_frame(buf.clone());
1501 self.shuf.add_frame(buf.clone());
1504 let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf));
1505 frm.set_keyframe(self.dstate.is_intra);
1506 frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P });
1509 fn flush(&mut self) {
1514 impl NAOptionHandler for VP8Decoder {
1515 fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] }
1516 fn set_options(&mut self, _options: &[NAOption]) { }
1517 fn query_option_value(&self, _name: &str) -> Option<NAValue> { None }
1520 pub fn get_decoder() -> Box<dyn NADecoder + Send> {
1521 Box::new(VP8Decoder::new())
1526 use nihav_core::codecs::RegisteredDecoders;
1527 use nihav_core::demuxers::RegisteredDemuxers;
1528 use nihav_codec_support::test::dec_video::*;
1529 use crate::duck_register_all_decoders;
1530 use crate::duck_register_all_demuxers;
1532 // all samples are from the official VP8 test bitstreams set
1533 fn test_vp8_core(name: &str, hash: [u32; 4]) {
1534 let mut dmx_reg = RegisteredDemuxers::new();
1535 duck_register_all_demuxers(&mut dmx_reg);
1536 let mut dec_reg = RegisteredDecoders::new();
1537 duck_register_all_decoders(&mut dec_reg);
1539 test_decoding("dkivf", "vp8", name, None, &dmx_reg,
1540 &dec_reg, ExpectedTestResult::MD5(hash));
1545 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-001.ivf",
1546 [0xfad12607, 0x4e1bd536, 0x3d43b9d1, 0xcadddb71]);
1550 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-002.ivf",
1551 [0x182f03dd, 0x264ebac0, 0x4e24c7c9, 0x499d7cdb]);
1555 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-003.ivf",
1556 [0xe5fe668b, 0x03390002, 0x2c3eb0ba, 0x76a44bd1]);
1560 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-004.ivf",
1561 [0x95097ce9, 0x808c1d47, 0xe03f99c4, 0x8ad111ec]);
1565 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-005.ivf",
1566 [0x0f469e4f, 0xd1dea533, 0xe5580688, 0xb2d242ff]);
1570 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-006.ivf",
1575 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-007.ivf",
1576 [0x92526913, 0xd89b6a9b, 0x00f2d602, 0xdef08bce]);
1580 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-008.ivf",
1581 [0x1676d1eb, 0x19bd175e, 0xc5bb10f5, 0xd49f24f1]);
1585 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-009.ivf",
1586 [0x19201a2d, 0x535bd82f, 0x41c1a565, 0x8def5379]);
1590 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-010.ivf",
1591 [0x61d05919, 0xa9883d9f, 0x215eb3f2, 0xdb63eb13]);
1595 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-011.ivf",
1596 [0x1a0afe5e, 0x70512a03, 0x323a8f11, 0x76bcf022]);
1600 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-012.ivf",
1601 [0x4ea997c8, 0x0dc2087e, 0x6deec81f, 0x1ecf6668]);
1605 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-013.ivf",
1606 [0x93169305, 0xd3054327, 0xbe3cc074, 0xf0773a75]);
1610 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-014.ivf",
1615 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-015.ivf",
1616 [0x23b9cc58, 0x2e344726, 0xe76cda09, 0x2b416bcf]);
1620 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-016.ivf",
1621 [0x55e889d2, 0x2f99718c, 0xf6936d55, 0xf8ade12b]);
1625 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-017.ivf",
1626 [0x95a68ffb, 0x228d1d8c, 0x6ee54f16, 0xa10fb9eb]);
1630 const DC_QUANTS: [i16; 128] = [
1631 4, 5, 6, 7, 8, 9, 10, 10,
1632 11, 12, 13, 14, 15, 16, 17, 17,
1633 18, 19, 20, 20, 21, 21, 22, 22,
1634 23, 23, 24, 25, 25, 26, 27, 28,
1635 29, 30, 31, 32, 33, 34, 35, 36,
1636 37, 37, 38, 39, 40, 41, 42, 43,
1637 44, 45, 46, 46, 47, 48, 49, 50,
1638 51, 52, 53, 54, 55, 56, 57, 58,
1639 59, 60, 61, 62, 63, 64, 65, 66,
1640 67, 68, 69, 70, 71, 72, 73, 74,
1641 75, 76, 76, 77, 78, 79, 80, 81,
1642 82, 83, 84, 85, 86, 87, 88, 89,
1643 91, 93, 95, 96, 98, 100, 101, 102,
1644 104, 106, 108, 110, 112, 114, 116, 118,
1645 122, 124, 126, 128, 130, 132, 134, 136,
1646 138, 140, 143, 145, 148, 151, 154, 157
1649 const AC_QUANTS: [i16; 128] = [
1650 4, 5, 6, 7, 8, 9, 10, 11,
1651 12, 13, 14, 15, 16, 17, 18, 19,
1652 20, 21, 22, 23, 24, 25, 26, 27,
1653 28, 29, 30, 31, 32, 33, 34, 35,
1654 36, 37, 38, 39, 40, 41, 42, 43,
1655 44, 45, 46, 47, 48, 49, 50, 51,
1656 52, 53, 54, 55, 56, 57, 58, 60,
1657 62, 64, 66, 68, 70, 72, 74, 76,
1658 78, 80, 82, 84, 86, 88, 90, 92,
1659 94, 96, 98, 100, 102, 104, 106, 108,
1660 110, 112, 114, 116, 119, 122, 125, 128,
1661 131, 134, 137, 140, 143, 146, 149, 152,
1662 155, 158, 161, 164, 167, 170, 173, 177,
1663 181, 185, 189, 193, 197, 201, 205, 209,
1664 213, 217, 221, 225, 229, 234, 239, 245,
1665 249, 254, 259, 264, 269, 274, 279, 284