1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::codecs::{MV, ZERO_MV};
4 use nihav_codec_support::data::GenericCache;
5 use super::vpcommon::*;
9 enum VPTreeDef<T: Copy> {
15 fn read_tree<T:Copy>(&mut self, tree_def: &[VPTreeDef<T>], tree_prob: &[u8]) -> T;
18 impl<'a> VPTreeReader for BoolCoder<'a> {
19 fn read_tree<T:Copy>(&mut self, tree_def: &[VPTreeDef<T>], tree_prob: &[u8]) -> T {
23 let bit = self.read_prob(tree_prob[idx >> 1]);
24 match tree_def[idx + (bit as usize)] {
25 VPTreeDef::Value(v) => return v,
26 VPTreeDef::Index(ix) => { idx = ix as usize; },
33 #[derive(Clone,Copy,PartialEq,Debug)]
41 //sub-block prediction modes
52 impl Default for PredMode {
53 fn default() -> Self { PredMode::DCPred }
57 fn to_b_mode(self) -> Self {
58 if self == PredMode::DCPred {
64 fn to_b_index(self) -> usize {
66 PredMode::DCPred => 0,
67 PredMode::TMPred => 1,
70 PredMode::LDPred => 4,
71 PredMode::RDPred => 5,
72 PredMode::VRPred => 6,
73 PredMode::VLPred => 7,
74 PredMode::HDPred => 8,
75 PredMode::HUPred => 9,
81 const PITCH_MODE_NORMAL: u8 = 0;
82 const PITCH_MODE_FOUR: u8 = 1;
83 const PITCH_MODE_X2: u8 = 2;
84 const PITCH_MODE_X4: u8 = 3;
86 #[derive(Clone,Copy,Default)]
93 #[derive(Clone,Copy,PartialEq)]
109 fn expand_token(bc: &mut BoolCoder, token: DCTToken) -> i16 {
112 DCTToken::Zero => return 0,
113 DCTToken::One => return if bc.read_bool() { -1 } else { 1 },
114 DCTToken::Two => return if bc.read_bool() { -2 } else { 2 },
115 DCTToken::Three => return if bc.read_bool() { -3 } else { 3 },
116 DCTToken::Four => return if bc.read_bool() { -4 } else { 4 },
117 DCTToken::Cat1 => cat = 0,
118 DCTToken::Cat2 => cat = 1,
119 DCTToken::Cat3 => cat = 2,
120 DCTToken::Cat4 => cat = 3,
121 DCTToken::Cat5 => cat = 4,
122 DCTToken::Cat6 => cat = 5,
126 let add_probs = &VP56_COEF_ADD_PROBS[cat];
127 for prob in add_probs.iter() {
128 if *prob == 128 { break; }
129 add = (add << 1) | (bc.read_prob(*prob) as i16);
131 let sign = bc.read_bool();
132 let level = VP56_COEF_BASE[cat] + add;
140 struct SBParams<'a> {
141 coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4],
142 scan: &'a [usize; 16],
146 fn decode_subblock<'a>(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 {
147 const COEF_BANDS: [usize; 16] = [ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
150 let start = if ctype != 0 { 0 } else { 1 };
152 let mut cval = pctx as usize;
153 for idx in start..16 {
154 let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval];
155 let tok = bc.read_tree(COEF_TREE, probs);
156 if tok == DCTToken::EOB { break; }
157 let level = expand_token(bc, tok);
158 coeffs[sbparams.scan[idx]] = level.wrapping_mul(sbparams.qmat[idx]);
159 cval = level.abs().min(2) as usize;
162 if has_nz > 0 { 1 } else { 0 }
165 #[derive(Clone,Copy,Default)]
175 struct DecoderState {
176 features: [Option<MBFeature>; 4],
183 loop_filter_level: u8,
189 kf_ymode_prob: [u8; 4],
190 kf_uvmode_prob: [u8; 3],
195 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
196 mv_probs: [[u8; 17]; 2],
198 force_quant: Option<u8>,
199 force_loop_str: Option<u8>,
200 force_gf_update: bool,
201 force_pitch: Option<u8>,
204 pdc_pred_val: [i16; 2],
205 pdc_pred_count: [usize; 2],
207 ipred_ctx_y: IPredContext,
208 ipred_ctx_u: IPredContext,
209 ipred_ctx_v: IPredContext,
213 fn reset(&mut self) {
214 self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS);
215 self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS);
216 self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS);
217 self.mv_probs.copy_from_slice(&DEFAULT_MV_PROBS);
221 #[derive(Clone,Copy,Debug,PartialEq)]
229 #[derive(Clone,Copy,Debug,PartialEq)]
237 fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 17]) -> i16 {
238 const LONG_VECTOR_ORDER: [usize; 7] = [ 0, 1, 2, 7, 6, 5, 4 ];
240 let val = if !bc.read_prob(probs[0]) {
241 bc.read_tree(SMALL_MV_TREE, &probs[2..9])
243 let raw_probs = &probs[9..];
245 for ord in LONG_VECTOR_ORDER.iter() {
246 raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord;
248 if (raw & 0xF0) != 0 {
249 raw |= (bc.read_prob(raw_probs[3]) as i16) << 3;
255 if (val == 0) || !bc.read_prob(probs[1]) {
263 y_pred: GenericCache<u8>,
264 u_pred: GenericCache<u8>,
265 v_pred: GenericCache<u8>,
266 y2_pred: GenericCache<u8>,
267 y_pred_left: [u8; 4],
268 u_pred_left: [u8; 2],
269 v_pred_left: [u8; 2],
276 y_pred: GenericCache::new(1, 1, 0),
277 u_pred: GenericCache::new(1, 1, 0),
278 v_pred: GenericCache::new(1, 1, 0),
279 y2_pred: GenericCache::new(1, 1, 0),
286 fn resize(&mut self, mb_w: usize) {
287 self.y_pred = GenericCache::new(4, mb_w * 4 + 1, 0);
288 self.u_pred = GenericCache::new(2, mb_w * 2 + 1, 0);
289 self.v_pred = GenericCache::new(2, mb_w * 2 + 1, 0);
290 self.y2_pred = GenericCache::new(1, mb_w + 1, 0);
292 fn reset(&mut self) {
296 self.y2_pred.reset();
297 self.y_pred_left = [0; 4];
298 self.u_pred_left = [0; 2];
299 self.v_pred_left = [0; 2];
300 self.y2_pred_left = 0;
302 fn update_row(&mut self) {
303 self.y_pred.update_row();
304 self.u_pred.update_row();
305 self.v_pred.update_row();
306 self.y2_pred.update_row();
311 info: NACodecInfoRef,
318 mb_info: Vec<MBInfo>,
322 ymodes: Vec<PredMode>,
324 uvmodes: Vec<PredMode>,
325 uvmode_stride: usize,
327 dstate: DecoderState,
330 coeffs: [[i16; 16]; 25],
332 qmat: [[[i16; 16]; 3]; 5],
334 mc_buf: NAVideoBufferRef<u8>,
336 tmp_scan: [usize; 16],
341 let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap();
342 let mut scan = [0; 16];
343 scan.copy_from_slice(&DEFAULT_SCAN_ORDER);
344 let mc_buf = vt.get_vbuf().unwrap();
346 info: NACodecInfoRef::default(),
348 shuf: VPShuffler::new(),
362 dstate: DecoderState::default(),
363 pcache: PredCache::new(),
365 coeffs: [[0; 16]; 25],
368 qmat: [[[0; 16]; 3]; 5],
373 fn set_dimensions(&mut self, width: usize, height: usize) {
374 if (width == self.width) && (height == self.height) {
378 self.height = height;
379 self.mb_w = (self.width + 15) >> 4;
380 self.mb_h = (self.height + 15) >> 4;
381 self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
382 self.mv_stride = self.mb_w * 4;
383 self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV);
385 self.ymode_stride = self.mb_w * 4;
386 self.uvmode_stride = self.mb_w;
387 self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default());
388 self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default());
390 self.pcache.resize(self.mb_w);
392 fn read_features(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
393 for (i, feat) in self.dstate.features.iter_mut().enumerate() {
395 let mut feature = MBFeature::default();
396 feature.present_prob = bc.read_byte();
397 for tp in feature.tree_probs.iter_mut() {
399 *tp = bc.read_byte();
405 let fbits = match i {
408 _ => if self.dstate.version == 0 { 8 } else { 5 },
410 for dval in feature.def_val.iter_mut() {
412 *dval = bc.read_bits(fbits) as u8;
418 *feat = Some(feature);
425 fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
430 if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) {
431 self.dstate.coef_probs[i][j][k][l] = bc.read_byte();
439 fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
442 if bc.read_prob(MV_UPDATE_PROBS[comp][i]) {
443 self.dstate.mv_probs[comp][i] = bc.read_probability();
449 fn decode_mb_features(&mut self, bc: &mut BoolCoder, _mb_x: usize, _mb_y: usize) -> DecoderResult<()> {
450 self.dstate.force_quant = None;
451 self.dstate.force_loop_str = None;
452 self.dstate.force_gf_update = false;
453 self.dstate.force_pitch = None;
454 for (i, feat) in self.dstate.features.iter().enumerate() {
455 if let Some(feat) = feat {
456 let present = bc.read_prob(feat.present_prob);
458 let ftype_idx = bc.read_tree(FEATURE_TREE, &feat.tree_probs);
459 let val = feat.def_val[ftype_idx];
461 0 => self.dstate.force_quant = Some(ftype_idx as u8),
462 1 => self.dstate.force_loop_str = Some(val),
463 2 => self.dstate.force_gf_update = true,
464 _ => self.dstate.force_pitch = Some(val),
471 fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_idx: usize, use_last: bool) {
472 let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 };
473 let mut sbparams = SBParams {
474 scan: &DEFAULT_SCAN_ORDER,
475 qmat: &self.qmat[qmat_idx][2],
476 coef_probs: &self.dstate.coef_probs,
478 let mut has_ac = [false; 25];
480 if self.dstate.has_y2 {
481 let pred = &self.pcache.y2_pred;
482 let pidx = pred.xpos + mb_x;
483 let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride];
485 let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams);
486 self.pcache.y2_pred.data[pidx] = has_nz;
487 self.pcache.y2_pred_left = has_nz;
488 has_ac[24] = has_nz > 0;
492 let pred = &mut self.pcache.y2_pred;
493 let pidx = pred.xpos + mb_x;
494 pred.data[pidx] = pred.data[pidx - pred.stride];
498 sbparams.scan = &self.scan;
499 sbparams.qmat = &self.qmat[qmat_idx][0];
503 let pred = &self.pcache.y_pred;
504 let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride;
505 let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride];
507 let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams);
508 self.pcache.y_pred.data[pidx] = has_nz;
509 self.pcache.y_pred_left[by] = has_nz;
510 has_ac[i] = has_nz > 0;
512 sbparams.qmat = &self.qmat[qmat_idx][1];
515 let by = (i >> 1) & 1;
516 let pred = &self.pcache.u_pred;
517 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
518 let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride];
520 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
521 self.pcache.u_pred.data[pidx] = has_nz;
522 self.pcache.u_pred_left[by] = has_nz;
523 has_ac[i] = has_nz > 0;
527 let by = (i >> 1) & 1;
528 let pred = &self.pcache.v_pred;
529 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
530 let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride];
532 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
533 self.pcache.v_pred.data[pidx] = has_nz;
534 self.pcache.v_pred_left[by] = has_nz;
535 has_ac[i] = has_nz > 0;
538 if self.dstate.has_y2 {
539 let y2block = &mut self.coeffs[24];
540 if self.mb_info[mb_idx].mb_type != VPMBType::Intra {
541 let mut dc = y2block[0];
542 let pdc_idx = if use_last { 0 } else { 1 };
543 let pval = self.dstate.pdc_pred_val[pdc_idx];
545 if self.dstate.pdc_pred_count[pdc_idx] > 3 {
549 if (pval == 0) || (dc == 0) || ((pval ^ dc) < 0) {
550 self.dstate.pdc_pred_count[pdc_idx] = 0;
551 } else if dc == pval {
552 self.dstate.pdc_pred_count[pdc_idx] += 1;
554 self.dstate.pdc_pred_val[pdc_idx] = dc;
558 } else if y2block[0] != 0 {
562 self.coeffs[i][0] = self.coeffs[24][i];
567 idct4x4(&mut self.coeffs[i]);
568 } else if self.coeffs[i][0] != 0 {
569 idct4x4_dc(&mut self.coeffs[i]);
574 fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
575 self.qmat[0][0][0] = Y_DC_QUANTS[y_dc_q];
577 self.qmat[0][0][i] = Y_AC_QUANTS[y_ac_q];
579 self.qmat[0][1][0] = UV_DC_QUANTS[uv_dc_q];
581 self.qmat[0][1][i] = UV_AC_QUANTS[uv_ac_q];
583 self.qmat[0][2][0] = Y2_DC_QUANTS[y2_dc_q];
585 self.qmat[0][2][i] = Y2_AC_QUANTS[y2_ac_q];
587 if let Some(ref feat) = self.dstate.features[0] {
589 let q = feat.def_val[j] as usize;
590 self.qmat[j + 1][0][0] = Y_DC_QUANTS[q];
592 self.qmat[j + 1][0][i] = Y_AC_QUANTS[q];
594 self.qmat[j + 1][1][0] = UV_DC_QUANTS[q];
596 self.qmat[j + 1][1][i] = UV_AC_QUANTS[q];
598 self.qmat[j + 1][2][0] = Y2_DC_QUANTS[q];
600 self.qmat[j + 1][2][i] = Y2_AC_QUANTS[q];
605 fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) {
606 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
609 self.ymodes[iidx + x] = ymode;
611 iidx += self.ymode_stride;
614 fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) {
615 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
618 self.mvs[iidx + x] = mv;
620 iidx += self.mb_w * 4;
623 fn find_mv_pred(&self, mb_x: usize, mb_y: usize) -> ([u8; 4], MV, MV, MV) {
624 const CAND_POS: [(i8, i8, u8, u8); 12] = [
625 (-1, 0, 8, 12), ( 0, -1, 8, 3),
626 (-1, -1, 2, 15), (-1, 1, 2, 12),
627 (-2, 0, 2, 12), ( 0, -2, 2, 3),
628 (-1, -2, 1, 15), (-2, -1, 1, 15),
629 (-2, 1, 1, 12), (-1, 2, 1, 12),
630 (-2, -2, 1, 15), (-2, 2, 1, 12)
633 let mut nearest_mv = ZERO_MV;
634 let mut near_mv = ZERO_MV;
636 let mut ct: [u8; 4] = [0; 4];
638 let start = if self.dstate.version == 0 { 1 } else { 0 };
639 let mvwrap = (self.mb_w as isize) + 1;
640 for (yoff, xoff, weight, blk_no) in CAND_POS.iter() {
641 let cx = (mb_x as isize) + (*xoff as isize);
642 let cy = (mb_y as isize) + (*yoff as isize);
643 let mvpos = cx + cy * mvwrap;
644 if (mvpos < start) || ((mvpos % mvwrap) == (mvwrap - 1)) {
648 let cx = (mvpos % mvwrap) as usize;
649 let cy = (mvpos / mvwrap) as usize;
650 let bx = (*blk_no as usize) & 3;
651 let by = (*blk_no as usize) >> 2;
652 let blk_pos = cx * 4 + bx + (cy * 4 + by) * self.mv_stride;
653 let mv = self.mvs[blk_pos];
659 if (nearest_mv == ZERO_MV) || (nearest_mv == mv) {
662 } else if near_mv == ZERO_MV {
666 idx = if mv == near_mv { 2 } else { 3 };
670 let pred_mv = if ct[1] > ct[2] {
671 if ct[1] >= ct[0] { nearest_mv } else { ZERO_MV }
673 if ct[2] >= ct[0] { near_mv } else { ZERO_MV }
676 let mvprobs = [INTER_MODE_PROBS[ct[0] as usize][0],
677 INTER_MODE_PROBS[ct[1] as usize][1],
678 INTER_MODE_PROBS[ct[2] as usize][2],
679 INTER_MODE_PROBS[ct[2] as usize][3]];
681 (mvprobs, nearest_mv, near_mv, pred_mv)
683 fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV {
684 let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS);
685 let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride;
688 if (mb_x > 0) || (bx > 0) {
695 if (mb_y > 0) || (by > 0) {
696 self.mvs[mvidx - self.mv_stride]
701 SubMVRef::Zero => ZERO_MV,
703 let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]);
704 let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]);
705 pred_mv + MV{ x: dmx, y: dmy }
709 fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> {
710 let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS);
711 let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
713 MVSplitMode::TopBottom => {
714 let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
716 for x in 0..4 { self.mvs[mvidx + x] = top_mv; }
717 mvidx += self.mv_stride;
719 let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv);
721 for x in 0..4 { self.mvs[mvidx + x] = bot_mv; }
722 mvidx += self.mv_stride;
725 MVSplitMode::LeftRight => {
726 let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
727 self.mvs[mvidx + 1] = left_mv;
728 let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv);
730 self.mvs[mvidx + 0] = left_mv;
731 self.mvs[mvidx + 1] = left_mv;
732 self.mvs[mvidx + 2] = right_mv;
733 self.mvs[mvidx + 3] = right_mv;
734 mvidx += self.mv_stride;
737 MVSplitMode::Quarters => {
738 for y in (0..4).step_by(2) {
739 for x in (0..4).step_by(2) {
740 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
741 self.mvs[mvidx + x + 1] = self.mvs[mvidx + x];
744 self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x];
746 mvidx += self.mv_stride * 2;
749 MVSplitMode::Sixteenths => {
752 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
754 mvidx += self.mv_stride;
761 fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool, pitch_mode: u8) {
763 let ydst = &mut dframe.data[dframe.offset[0]..];
764 let ystride = dframe.stride[0];
765 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
767 PITCH_MODE_NORMAL => {
770 add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]);
777 add_coeffs16x1(ydst, yoff, &self.coeffs[y]);
784 add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]);
788 yoff -= 15 * ystride;
791 add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]);
799 add_coeffs4x4(ydst, yoff + x * 4, ystride * 4, &self.coeffs[x + y * 4]);
807 let dst = &mut dframe.data[0..];
808 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
809 let ustride = dframe.stride[1];
810 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
811 let vstride = dframe.stride[2];
812 if (pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_FOUR) {
815 add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]);
816 add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]);
824 add_coeffs4x4(dst, uoff + x * 4, ustride * 2, &self.coeffs[16 + x + y * 2]);
825 add_coeffs4x4(dst, voff + x * 4, vstride * 2, &self.coeffs[20 + x + y * 2]);
832 fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize) -> DecoderResult<()> {
833 let pitch = self.dstate.force_pitch.unwrap_or(0);
834 let pitch_mode = (pitch >> 3) & 3;
836 let mb_idx = mb_x + mb_y * self.mb_w;
837 let has_top = mb_y > 0;
838 let has_left = mb_x > 0;
839 let ydst = &mut dframe.data[dframe.offset[0]..];
840 let ystride = dframe.stride[0];
841 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
842 let ipred_ctx_y = &mut self.dstate.ipred_ctx_y;
843 ipred_ctx_y.has_top = has_top;
844 ipred_ctx_y.has_left = has_left;
845 let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred;
847 ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16);
848 match self.mb_info[mb_idx].ymode {
849 PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y),
850 PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y),
851 PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y),
852 PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y),
856 validate!((pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_X2));
857 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
858 let mut tr_save = [0x80u8; 16];
859 if pitch_mode == PITCH_MODE_X2 {
860 // reorganise coefficient data for interlaced case
861 for y in (0..4).step_by(2) {
863 let mut tmpblock = [0i16; 16 * 2];
864 let eidx = x + y * 4;
865 let oidx = x + y * 4 + 4;
868 tmpblock[i * 8 + 0 + j] = self.coeffs[eidx][i * 4 + j];
869 tmpblock[i * 8 + 4 + j] = self.coeffs[oidx][i * 4 + j];
872 self.coeffs[eidx].copy_from_slice(&tmpblock[0..16]);
873 self.coeffs[oidx].copy_from_slice(&tmpblock[16..32]);
877 let tr_edge = if has_top { ydst[yoff - ystride + 15] } else { 0x80 };
880 ipred_ctx_y.has_left = has_left || x > 0;
881 let bmode = self.ymodes[iidx + x];
882 let cur_yoff = yoff + x * 4;
883 let has_tr = ipred_ctx_y.has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1)));
884 let has_dl = ipred_ctx_y.has_left && (x == 0) && (y < 3);
885 ipred_ctx_y.fill(ydst, cur_yoff, ystride,
886 if has_tr { 8 } else { 4 },
887 if has_dl { 8 } else { 4 });
890 ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i];
894 tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4];
897 if (mb_x == self.mb_w - 1) && has_top && (x == 3) {
899 ipred_ctx_y.top[i + 4] = tr_edge;
903 PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y),
904 PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y),
905 PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y),
906 PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y),
907 PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y),
908 PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y),
909 PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y),
910 PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y),
911 PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y),
912 PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y),
915 add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]);
917 ipred_ctx_y.has_top = true;
919 iidx += self.ymode_stride;
922 let dst = &mut dframe.data[0..];
923 let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
924 let ustride = dframe.stride[1];
925 let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
926 let vstride = dframe.stride[2];
927 let ipred_ctx_u = &mut self.dstate.ipred_ctx_u;
928 let ipred_ctx_v = &mut self.dstate.ipred_ctx_v;
929 ipred_ctx_u.has_top = has_top;
930 ipred_ctx_v.has_top = has_top;
931 ipred_ctx_u.has_left = has_left;
932 ipred_ctx_v.has_left = has_left;
933 ipred_ctx_u.fill(dst, uoff, ustride, 8, 8);
934 ipred_ctx_v.fill(dst, voff, vstride, 8, 8);
935 match self.mb_info[mb_idx].uvmode {
936 PredMode::DCPred => {
937 IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u);
938 IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v);
941 IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u);
942 IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v);
945 IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u);
946 IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v);
948 PredMode::TMPred => {
949 IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u);
950 IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v);
954 self.add_residue(dframe, mb_x, mb_y, is_normal, pitch_mode);
957 fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, use_last: bool) {
958 let pitch = self.dstate.force_pitch.unwrap_or(0);
959 let pitch_dmode = (pitch >> 3) & 3;
960 let pitch_smode = pitch & 7;
962 let refframe = (if use_last { self.shuf.get_last() } else { self.shuf.get_golden() }).unwrap();
963 let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV;
964 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
965 let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
967 let dst = &mut dframe.data[0..];
968 let ystride = dframe.stride[0];
969 let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
970 if pitch_smode == 0 {
972 mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16,
973 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf);
977 mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
978 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf);
981 iidx += self.mv_stride;
986 mc_block_special(dst, yoff, ystride, mb_x * 16, mb_y * 16,
987 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2,
988 refframe.clone(), 0, &mut mc_buf, 16, pitch_smode);
992 mc_block_special(dst, yoff + x * 4, ystride,
993 mb_x * 16 + x * 4, mb_y * 16 + y * 4,
994 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2,
995 refframe.clone(), 0, &mut mc_buf, 4, pitch_smode);
998 iidx += self.mv_stride;
1003 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
1004 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
1005 let ustride = dframe.stride[1];
1006 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
1007 let vstride = dframe.stride[2];
1009 let chroma_mv = self.mvs[iidx];
1011 if pitch_smode == 0 {
1012 mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1013 mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1015 mc_block_special(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y,
1016 refframe.clone(), 1, &mut mc_buf, 8, pitch_smode);
1017 mc_block_special(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y,
1018 refframe.clone(), 2, &mut mc_buf, 8, pitch_smode);
1023 let mut chroma_mv = self.mvs[iidx + x * 2] + self.mvs[iidx + x * 2 + 1]
1024 + self.mvs[iidx + x * 2 + self.mv_stride]
1025 + self.mvs[iidx + x * 2 + self.mv_stride + 1];
1026 if chroma_mv.x < 0 {
1031 if chroma_mv.y < 0 {
1039 if pitch_smode == 0 {
1040 mc_block4x4(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1041 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1042 mc_block4x4(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1043 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1045 mc_block_special(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1046 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf,
1048 mc_block_special(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1049 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf,
1053 uoff += ustride * 4;
1054 voff += vstride * 4;
1055 iidx += 2 * self.mv_stride;
1058 self.add_residue(dframe, mb_x, mb_y, true, pitch_dmode);
1060 fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8) {
1061 const HIGH_EDGE_VAR_THR: [[u8; 64]; 2] = [
1063 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1064 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1065 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
1066 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
1068 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1069 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1070 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
1071 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
1074 let edge_thr = (loop_str as i16) + 2;
1075 let luma_thr = loop_str as i16;
1076 let chroma_thr = (loop_str as i16) * 2;
1077 let inner_thr = if self.dstate.loop_sharpness == 0 {
1080 let bound1 = (9 - self.dstate.loop_sharpness) as i16;
1081 let shift = (self.dstate.loop_sharpness + 3) >> 2;
1082 ((loop_str as i16) >> shift).min(bound1)
1084 let hev_thr = HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize] as i16;
1086 let ystride = dframe.stride[0];
1087 let ustride = dframe.stride[1];
1088 let vstride = dframe.stride[2];
1089 let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
1090 let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride;
1091 let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride;
1093 let (loop_edge, loop_inner) = if self.dstate.lf_simple {
1094 (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc)
1096 (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc)
1100 loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr);
1101 loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr);
1102 loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr);
1105 loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr);
1106 loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr);
1107 loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr);
1111 loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, luma_thr, inner_thr, hev_thr);
1113 loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, chroma_thr, inner_thr, hev_thr);
1114 loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, chroma_thr, inner_thr, hev_thr);
1117 loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, luma_thr, inner_thr, hev_thr);
1119 loop_inner(dframe.data, upos + 4, 1, ustride, 8, chroma_thr, inner_thr, hev_thr);
1120 loop_inner(dframe.data, vpos + 4, 1, vstride, 8, chroma_thr, inner_thr, hev_thr);
1124 impl NADecoder for VP7Decoder {
1125 fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> {
1126 if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
1127 let fmt = YUV420_FORMAT;
1128 let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt);
1129 let myinfo = NACodecTypeInfo::Video(myvinfo.clone());
1130 self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref();
1132 supp.pool_u8.set_dec_bufs(4);
1133 supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?;
1134 self.set_dimensions(myvinfo.get_width(), myvinfo.get_height());
1137 Err(DecoderError::InvalidData)
1140 fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> {
1141 let src = pkt.get_buffer();
1143 validate!(src.len() > 4);
1145 let frame_tag = read_u24le(src.as_slice())?;
1146 self.dstate.is_intra = (frame_tag & 1) == 0;
1147 self.dstate.version = ((frame_tag >> 1) & 7) as u8;
1148 let part2_off = (frame_tag >> 4) as usize;
1149 let part1_off = if self.dstate.version == 0 { 4 } else { 3 };
1151 validate!(src.len() > part1_off + part2_off);
1152 let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?;
1153 let mut bc_main = BoolCoder::new(&src[part1_off + part2_off..])?;
1154 if self.dstate.is_intra {
1155 let width = bc.read_bits(12) as usize;
1156 let height = bc.read_bits(12) as usize;
1157 let _scalev = bc.read_bits(2);
1158 let _scaleh = bc.read_bits(2);
1159 validate!((width > 0) && (height > 0));
1160 self.set_dimensions(width, height);
1162 self.dstate.reset();
1163 self.scan.copy_from_slice(&DEFAULT_SCAN_ORDER);
1165 if !self.shuf.has_refs() {
1166 return Err(DecoderError::MissingReference);
1170 self.read_features(&mut bc)?;
1172 let y_ac_q = bc.read_bits(7) as usize;
1173 let y_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1174 let y2_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1175 let y2_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1176 let uv_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1177 let uv_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1178 self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
1180 let update_gf = if self.dstate.is_intra { true } else { bc.read_bool() };
1182 let mut has_fading_feature = true;
1183 let mut keep_probs = true;
1184 if self.dstate.version != 0 {
1185 keep_probs = bc.read_bool();
1186 if self.dstate.is_intra {
1187 has_fading_feature = true;
1189 has_fading_feature = bc.read_bool();
1193 if has_fading_feature {
1194 self.dstate.fading = bc.read_bool();
1195 if self.dstate.fading {
1196 self.dstate.fade_alpha = bc.read_sbits(8) as u16;
1197 self.dstate.fade_beta = bc.read_sbits(8) as u16;
1198 if let Some(pframe) = self.shuf.get_last() {
1199 let mut fframe = supp.pool_u8.get_free().unwrap();
1200 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut fframe).unwrap();
1201 fade_frame(pframe, &mut dframe, self.dstate.fade_alpha, self.dstate.fade_beta);
1202 self.shuf.add_frame(fframe);
1206 self.dstate.fading = false;
1209 if self.dstate.version == 0 {
1210 self.dstate.lf_simple = bc.read_bool();
1215 self.scan[i] = DEFAULT_SCAN_ORDER[bc.read_bits(4) as usize];
1219 if self.dstate.version != 0 {
1220 self.dstate.lf_simple = bc.read_bool();
1222 self.dstate.lf_simple = false;
1225 self.dstate.loop_filter_level = bc.read_bits(6) as u8;
1226 self.dstate.loop_sharpness = bc.read_bits(3) as u8;
1228 self.read_dct_coef_prob_upd(&mut bc)?;
1230 if !self.dstate.is_intra {
1231 self.dstate.prob_intra_pred = bc.read_byte();
1232 self.dstate.prob_last_pred = bc.read_byte();
1235 self.dstate.kf_ymode_prob[i] = bc.read_byte();
1240 self.dstate.kf_uvmode_prob[i] = bc.read_byte();
1243 self.read_mv_prob_upd(&mut bc)?;
1246 self.tmp_scan.copy_from_slice(&self.scan);
1249 let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT);
1250 let ret = supp.pool_u8.get_free();
1252 return Err(DecoderError::AllocError);
1254 let mut buf = ret.unwrap();
1255 if buf.get_info() != vinfo {
1257 supp.pool_u8.reset();
1258 supp.pool_u8.prealloc_video(vinfo, 4)?;
1259 let ret = supp.pool_u8.get_free();
1261 return Err(DecoderError::AllocError);
1265 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap();
1268 self.pcache.reset();
1269 if self.dstate.is_intra || (self.dstate.version > 0) {
1270 self.dstate.pdc_pred_val = [0; 2];
1271 self.dstate.pdc_pred_count = [0; 2];
1273 let mut use_last = true;
1274 for mb_y in 0..self.mb_h {
1275 for mb_x in 0..self.mb_w {
1276 self.decode_mb_features(&mut bc, mb_x, mb_y)?;
1277 self.dstate.has_y2 = true;
1278 if self.dstate.is_intra {
1279 let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS);
1280 if ymode == PredMode::BPred {
1281 self.dstate.has_y2 = false;
1282 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1285 let top_mode = if (y > 0) || (mb_y > 0) {
1286 self.ymodes[iidx + x - self.ymode_stride]
1290 let left_mode = if (x > 0) || (mb_x > 0) {
1291 self.ymodes[iidx + x - 1]
1295 let top_idx = top_mode.to_b_index();
1296 let left_idx = left_mode.to_b_index();
1297 let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]);
1298 self.ymodes[iidx + x] = bmode;
1300 iidx += self.ymode_stride;
1303 self.fill_ymode(mb_x, mb_y, ymode.to_b_mode());
1305 let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS);
1306 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1307 self.mb_info[mb_idx].ymode = ymode;
1308 self.mb_info[mb_idx].uvmode = uvmode;
1309 } else if !bc.read_prob(self.dstate.prob_intra_pred) {
1310 let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob);
1311 if ymode == PredMode::BPred {
1312 self.dstate.has_y2 = false;
1313 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1316 let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS);
1317 self.ymodes[iidx + x] = bmode;
1319 iidx += self.ymode_stride;
1322 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1324 let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob);
1325 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1326 self.mb_info[mb_idx].ymode = ymode;
1327 self.mb_info[mb_idx].uvmode = uvmode;
1328 self.fill_mv(mb_x, mb_y, ZERO_MV);
1330 use_last = !bc.read_prob(self.dstate.prob_last_pred);
1332 let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y);
1333 let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs);
1336 VPMBType::InterNearest => {
1337 self.fill_mv(mb_x, mb_y, nearest_mv);
1339 VPMBType::InterNear => {
1340 self.fill_mv(mb_x, mb_y, near_mv);
1342 VPMBType::InterNoMV => {
1343 self.fill_mv(mb_x, mb_y, ZERO_MV);
1345 VPMBType::InterMV => {
1346 let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]);
1347 let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]);
1348 let new_mv = pred_mv + MV{ x: dmx, y: dmy };
1349 self.fill_mv(mb_x, mb_y, new_mv);
1351 VPMBType::InterFourMV => {
1352 self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?;
1354 _ => unreachable!(),
1357 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1358 self.mb_info[mb_idx].mb_type = mbtype;
1359 self.mb_info[mb_idx].ymode = PredMode::Inter;
1360 self.mb_info[mb_idx].uvmode = PredMode::Inter;
1362 self.decode_residue(&mut bc_main, mb_x, mb_idx, use_last);
1363 match self.mb_info[mb_idx].mb_type {
1364 VPMBType::Intra => {
1365 self.recon_intra_mb(&mut dframe, mb_x, mb_y)?;
1368 self.recon_inter_mb(&mut dframe, mb_x, mb_y, use_last);
1371 if let Some(loop_str) = self.dstate.force_loop_str {
1372 self.mb_info[mb_idx].loop_str = loop_str;
1374 self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level;
1376 self.mb_info[mb_idx].upd_gf = self.dstate.force_gf_update;
1379 self.pcache.update_row();
1382 for mb_y in 0..self.mb_h {
1383 for mb_x in 0..self.mb_w {
1384 let loop_str = self.mb_info[mb_idx].loop_str;
1385 self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str);
1389 if !update_gf && self.dstate.features[2].is_some() {
1390 let gf = self.shuf.get_golden().unwrap();
1391 let mut new_gf = supp.pool_u8.get_copy(&gf).unwrap();
1392 let dframe = NASimpleVideoFrame::from_video_buf(&mut new_gf).unwrap();
1394 let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
1395 for mb_y in 0..self.mb_h {
1396 for mb_x in 0..self.mb_w {
1397 if self.mb_info[mb_idx].upd_gf {
1398 mc_block16x16(dframe.data, dframe.offset[0] + mb_x * 16 + mb_y * 16 * dframe.stride[0], dframe.stride[0], mb_x * 16, mb_y * 16, 0, 0, buf.clone(), 0, &mut mc_buf);
1399 mc_block8x8(dframe.data, dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1], dframe.stride[1], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 1, &mut mc_buf);
1400 mc_block8x8(dframe.data, dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2], dframe.stride[2], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 2, &mut mc_buf);
1405 self.shuf.add_golden_frame(new_gf);
1409 self.scan.copy_from_slice(&self.tmp_scan);
1412 self.shuf.add_golden_frame(buf.clone());
1414 self.shuf.add_frame(buf.clone());
1416 let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf));
1417 frm.set_keyframe(self.dstate.is_intra);
1418 frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P });
1421 fn flush(&mut self) {
1426 pub fn get_decoder() -> Box<NADecoder + Send> {
1427 Box::new(VP7Decoder::new())
1432 use nihav_core::codecs::RegisteredDecoders;
1433 use nihav_core::demuxers::RegisteredDemuxers;
1434 use nihav_codec_support::test::dec_video::*;
1435 use crate::duck_register_all_codecs;
1436 use nihav_commonfmt::generic_register_all_demuxers;
1440 let mut dmx_reg = RegisteredDemuxers::new();
1441 generic_register_all_demuxers(&mut dmx_reg);
1442 let mut dec_reg = RegisteredDecoders::new();
1443 duck_register_all_codecs(&mut dec_reg);
1445 test_decoding("avi", "vp7", "assets/Duck/interlaced_blit_pitch.avi", Some(12), &dmx_reg,
1446 &dec_reg, ExpectedTestResult::MD5Frames(vec![
1447 [0xb79fb6f8, 0xed51ac9e, 0x9e423456, 0xc0918e7f],
1448 [0xbf8d1274, 0x83515e15, 0x8c0887de, 0xfbfd05d3],
1449 [0x8ad00466, 0x80b6cbfb, 0x54de408e, 0x9efbc05e],
1450 [0x144122c5, 0x6897b553, 0x93474d29, 0x1a1274ec],
1451 [0x06ff5d07, 0x55825d38, 0x072b0a78, 0xfcb5020f],
1452 [0xfd01591b, 0xc42113e7, 0xc5a5550f, 0xb30f3b02],
1453 [0x155e0d6e, 0x96d75e06, 0x9bd7ce87, 0xacf868e1],
1454 [0xfd79103a, 0x695d21d3, 0xfeacb5b4, 0x1d869d08],
1455 [0xf4bcfeac, 0x0d2c305c, 0x11416c96, 0x626a5ef6],
1456 [0x3579b66c, 0x0a7d7dc0, 0xe80b0395, 0xf6a70661],
1457 [0x5773768c, 0x813442e9, 0x4dd6f793, 0xb10fe55f],
1458 [0xcaaf0ddb, 0x65c2410e, 0x95da5bba, 0x3b90128e],
1459 [0x74773773, 0xe1dbadeb, 0x57aaf64b, 0x9c21e3c7]]));
1463 /*const DEFAULT_ZIGZAG: [usize; 16] = [
1469 const DEFAULT_SCAN_ORDER: [usize; 16] = [
1476 const Y_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1477 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1478 VPTreeDef::Index(4), VPTreeDef::Index(6),
1479 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Value(PredMode::HPred),
1480 VPTreeDef::Value(PredMode::TMPred), VPTreeDef::Value(PredMode::BPred),
1482 const KF_Y_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1483 VPTreeDef::Value(PredMode::BPred), VPTreeDef::Index(2),
1484 VPTreeDef::Index(4), VPTreeDef::Index(6),
1485 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Value(PredMode::VPred),
1486 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Value(PredMode::TMPred),
1488 const UV_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1489 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1490 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Index(4),
1491 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Value(PredMode::TMPred)
1493 const B_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1494 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1495 VPTreeDef::Value(PredMode::TMPred), VPTreeDef::Index(4),
1496 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Index(6),
1497 VPTreeDef::Index(8), VPTreeDef::Index(12),
1498 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Index(10),
1499 VPTreeDef::Value(PredMode::RDPred), VPTreeDef::Value(PredMode::VRPred),
1500 VPTreeDef::Value(PredMode::LDPred), VPTreeDef::Index(14),
1501 VPTreeDef::Value(PredMode::VLPred), VPTreeDef::Index(16),
1502 VPTreeDef::Value(PredMode::HDPred), VPTreeDef::Value(PredMode::HUPred)
1505 const FEATURE_TREE: &[VPTreeDef<usize>] = &[
1506 VPTreeDef::Index(2), VPTreeDef::Index(4),
1507 VPTreeDef::Value(0), VPTreeDef::Value(1),
1508 VPTreeDef::Value(2), VPTreeDef::Value(3)
1511 const COEF_TREE: &[VPTreeDef<DCTToken>] = &[
1512 VPTreeDef::Value(DCTToken::EOB), VPTreeDef::Index(2),
1513 VPTreeDef::Value(DCTToken::Zero), VPTreeDef::Index(4),
1514 VPTreeDef::Value(DCTToken::One), VPTreeDef::Index(6),
1515 VPTreeDef::Index(8), VPTreeDef::Index(12),
1516 VPTreeDef::Value(DCTToken::Two), VPTreeDef::Index(10),
1517 VPTreeDef::Value(DCTToken::Three), VPTreeDef::Value(DCTToken::Four),
1518 VPTreeDef::Index(14), VPTreeDef::Index(16),
1519 VPTreeDef::Value(DCTToken::Cat1), VPTreeDef::Value(DCTToken::Cat2),
1520 VPTreeDef::Index(18), VPTreeDef::Index(20),
1521 VPTreeDef::Value(DCTToken::Cat3), VPTreeDef::Value(DCTToken::Cat4),
1522 VPTreeDef::Value(DCTToken::Cat5), VPTreeDef::Value(DCTToken::Cat6)
1525 const MV_REF_TREE: &[VPTreeDef<VPMBType>] = &[
1526 VPTreeDef::Value(VPMBType::InterNoMV), VPTreeDef::Index(2),
1527 VPTreeDef::Value(VPMBType::InterNearest), VPTreeDef::Index(4),
1528 VPTreeDef::Value(VPMBType::InterNear), VPTreeDef::Index(6),
1529 VPTreeDef::Value(VPMBType::InterMV), VPTreeDef::Value(VPMBType::InterFourMV)
1531 const SMALL_MV_TREE: &[VPTreeDef<i16>] = &[
1532 VPTreeDef::Index(2), VPTreeDef::Index(8),
1533 VPTreeDef::Index(4), VPTreeDef::Index(6),
1534 VPTreeDef::Value(0), VPTreeDef::Value(1),
1535 VPTreeDef::Value(2), VPTreeDef::Value(3),
1536 VPTreeDef::Index(10), VPTreeDef::Index(12),
1537 VPTreeDef::Value(4), VPTreeDef::Value(5),
1538 VPTreeDef::Value(6), VPTreeDef::Value(7)
1540 const MV_SPLIT_MODE_TREE: &[VPTreeDef<MVSplitMode>] = &[
1541 VPTreeDef::Value(MVSplitMode::Sixteenths), VPTreeDef::Index(2),
1542 VPTreeDef::Value(MVSplitMode::Quarters), VPTreeDef::Index(4),
1543 VPTreeDef::Value(MVSplitMode::TopBottom), VPTreeDef::Value(MVSplitMode::LeftRight)
1545 const SUB_MV_REF_TREE: &[VPTreeDef<SubMVRef>] = &[
1546 VPTreeDef::Value(SubMVRef::Left), VPTreeDef::Index(2),
1547 VPTreeDef::Value(SubMVRef::Above), VPTreeDef::Index(4),
1548 VPTreeDef::Value(SubMVRef::Zero), VPTreeDef::Value(SubMVRef::New)