1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_core::data::GenericCache;
4 use super::vpcommon::*;
8 enum VPTreeDef<T: Copy> {
14 fn read_tree<T:Copy>(&mut self, tree_def: &[VPTreeDef<T>], tree_prob: &[u8]) -> T;
17 impl<'a> VPTreeReader for BoolCoder<'a> {
18 fn read_tree<T:Copy>(&mut self, tree_def: &[VPTreeDef<T>], tree_prob: &[u8]) -> T {
22 let bit = self.read_prob(tree_prob[idx >> 1]);
23 match tree_def[idx + (bit as usize)] {
24 VPTreeDef::Value(v) => return v,
25 VPTreeDef::Index(ix) => { idx = ix as usize; },
32 #[derive(Clone,Copy,PartialEq,Debug)]
40 //sub-block prediction modes
51 impl Default for PredMode {
52 fn default() -> Self { PredMode::DCPred }
56 fn to_b_mode(self) -> Self {
57 if self == PredMode::DCPred {
63 fn to_b_index(self) -> usize {
65 PredMode::DCPred => 0,
66 PredMode::TMPred => 1,
69 PredMode::LDPred => 4,
70 PredMode::RDPred => 5,
71 PredMode::VRPred => 6,
72 PredMode::VLPred => 7,
73 PredMode::HDPred => 8,
74 PredMode::HUPred => 9,
80 const PITCH_MODE_NORMAL: u8 = 0;
81 const PITCH_MODE_FOUR: u8 = 1;
82 const PITCH_MODE_X2: u8 = 2;
83 const PITCH_MODE_X4: u8 = 3;
85 #[derive(Clone,Copy,Default)]
92 #[derive(Clone,Copy,PartialEq)]
108 fn expand_token(bc: &mut BoolCoder, token: DCTToken) -> i16 {
111 DCTToken::Zero => return 0,
112 DCTToken::One => return if bc.read_bool() { -1 } else { 1 },
113 DCTToken::Two => return if bc.read_bool() { -2 } else { 2 },
114 DCTToken::Three => return if bc.read_bool() { -3 } else { 3 },
115 DCTToken::Four => return if bc.read_bool() { -4 } else { 4 },
116 DCTToken::Cat1 => cat = 0,
117 DCTToken::Cat2 => cat = 1,
118 DCTToken::Cat3 => cat = 2,
119 DCTToken::Cat4 => cat = 3,
120 DCTToken::Cat5 => cat = 4,
121 DCTToken::Cat6 => cat = 5,
125 let add_probs = &VP56_COEF_ADD_PROBS[cat];
126 for prob in add_probs.iter() {
127 if *prob == 128 { break; }
128 add = (add << 1) | (bc.read_prob(*prob) as i16);
130 let sign = bc.read_bool();
131 let level = VP56_COEF_BASE[cat] + add;
139 struct SBParams<'a> {
140 coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4],
141 scan: &'a [usize; 16],
145 fn decode_subblock<'a>(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 {
146 const COEF_BANDS: [usize; 16] = [ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
149 let start = if ctype != 0 { 0 } else { 1 };
151 let mut cval = pctx as usize;
152 for idx in start..16 {
153 let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval];
154 let tok = bc.read_tree(COEF_TREE, probs);
155 if tok == DCTToken::EOB { break; }
156 let level = expand_token(bc, tok);
157 coeffs[sbparams.scan[idx]] = level.wrapping_mul(sbparams.qmat[idx]);
158 cval = level.abs().min(2) as usize;
161 if has_nz > 0 { 1 } else { 0 }
164 #[derive(Clone,Copy,Default)]
174 struct DecoderState {
175 features: [Option<MBFeature>; 4],
182 loop_filter_level: u8,
188 kf_ymode_prob: [u8; 4],
189 kf_uvmode_prob: [u8; 3],
194 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
195 mv_probs: [[u8; 17]; 2],
197 force_quant: Option<u8>,
198 force_loop_str: Option<u8>,
199 force_gf_update: bool,
200 force_pitch: Option<u8>,
204 pdc_pred_count: usize,
206 ipred_ctx_y: IPredContext,
207 ipred_ctx_u: IPredContext,
208 ipred_ctx_v: IPredContext,
212 fn reset(&mut self) {
213 self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS);
214 self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS);
215 self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS);
216 self.mv_probs.copy_from_slice(&DEFAULT_MV_PROBS);
220 #[derive(Clone,Copy,Debug,PartialEq)]
228 #[derive(Clone,Copy,Debug,PartialEq)]
236 fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 17]) -> i16 {
237 const LONG_VECTOR_ORDER: [usize; 7] = [ 0, 1, 2, 7, 6, 5, 4 ];
239 let val = if !bc.read_prob(probs[0]) {
240 bc.read_tree(SMALL_MV_TREE, &probs[2..9])
242 let raw_probs = &probs[9..];
244 for ord in LONG_VECTOR_ORDER.iter() {
245 raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord;
247 if (raw & 0xF0) != 0 {
248 raw |= (bc.read_prob(raw_probs[3]) as i16) << 3;
254 if (val == 0) || !bc.read_prob(probs[1]) {
262 y_pred: GenericCache<u8>,
263 u_pred: GenericCache<u8>,
264 v_pred: GenericCache<u8>,
265 y2_pred: GenericCache<u8>,
266 y_pred_left: [u8; 4],
267 u_pred_left: [u8; 2],
268 v_pred_left: [u8; 2],
275 y_pred: GenericCache::new(1, 1, 0),
276 u_pred: GenericCache::new(1, 1, 0),
277 v_pred: GenericCache::new(1, 1, 0),
278 y2_pred: GenericCache::new(1, 1, 0),
285 fn resize(&mut self, mb_w: usize) {
286 self.y_pred = GenericCache::new(4, mb_w * 4 + 1, 0);
287 self.u_pred = GenericCache::new(2, mb_w * 2 + 1, 0);
288 self.v_pred = GenericCache::new(2, mb_w * 2 + 1, 0);
289 self.y2_pred = GenericCache::new(1, mb_w + 1, 0);
291 fn reset(&mut self) {
295 self.y2_pred.reset();
296 self.y_pred_left = [0; 4];
297 self.u_pred_left = [0; 2];
298 self.v_pred_left = [0; 2];
299 self.y2_pred_left = 0;
301 fn update_row(&mut self) {
302 self.y_pred.update_row();
303 self.u_pred.update_row();
304 self.v_pred.update_row();
305 self.y2_pred.update_row();
310 info: NACodecInfoRef,
317 mb_info: Vec<MBInfo>,
321 ymodes: Vec<PredMode>,
323 uvmodes: Vec<PredMode>,
324 uvmode_stride: usize,
326 dstate: DecoderState,
329 coeffs: [[i16; 16]; 25],
331 qmat: [[[i16; 16]; 3]; 5],
333 mc_buf: NAVideoBufferRef<u8>,
335 tmp_scan: [usize; 16],
340 let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap();
341 let mut scan = [0; 16];
342 scan.copy_from_slice(&DEFAULT_SCAN_ORDER);
343 let mc_buf = vt.get_vbuf().unwrap();
345 info: NACodecInfoRef::default(),
347 shuf: VPShuffler::new(),
361 dstate: DecoderState::default(),
362 pcache: PredCache::new(),
364 coeffs: [[0; 16]; 25],
367 qmat: [[[0; 16]; 3]; 5],
372 fn set_dimensions(&mut self, width: usize, height: usize) {
373 if (width == self.width) && (height == self.height) {
377 self.height = height;
378 self.mb_w = (self.width + 15) >> 4;
379 self.mb_h = (self.height + 15) >> 4;
380 self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
381 self.mv_stride = self.mb_w * 4;
382 self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV);
384 self.ymode_stride = self.mb_w * 4;
385 self.uvmode_stride = self.mb_w;
386 self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default());
387 self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default());
389 self.pcache.resize(self.mb_w);
391 fn read_features(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
392 for (i, feat) in self.dstate.features.iter_mut().enumerate() {
394 let mut feature = MBFeature::default();
395 feature.present_prob = bc.read_byte();
396 for tp in feature.tree_probs.iter_mut() {
398 *tp = bc.read_byte();
404 let fbits = match i {
407 _ => if self.dstate.version == 0 { 8 } else { 5 },
409 for dval in feature.def_val.iter_mut() {
411 *dval = bc.read_bits(fbits) as u8;
417 *feat = Some(feature);
424 fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
429 if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) {
430 self.dstate.coef_probs[i][j][k][l] = bc.read_byte();
438 fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
441 if bc.read_prob(MV_UPDATE_PROBS[comp][i]) {
442 self.dstate.mv_probs[comp][i] = bc.read_probability();
448 fn decode_mb_features(&mut self, bc: &mut BoolCoder, _mb_x: usize, _mb_y: usize) -> DecoderResult<()> {
449 self.dstate.force_quant = None;
450 self.dstate.force_loop_str = None;
451 self.dstate.force_gf_update = false;
452 self.dstate.force_pitch = None;
453 for (i, feat) in self.dstate.features.iter().enumerate() {
454 if let Some(feat) = feat {
455 let present = bc.read_prob(feat.present_prob);
457 let ftype_idx = bc.read_tree(FEATURE_TREE, &feat.tree_probs);
458 let val = feat.def_val[ftype_idx];
460 0 => self.dstate.force_quant = Some(ftype_idx as u8),
461 1 => self.dstate.force_loop_str = Some(val),
462 2 => self.dstate.force_gf_update = true,
463 _ => self.dstate.force_pitch = Some(val),
470 fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_idx: usize) {
471 let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 };
472 let mut sbparams = SBParams {
473 scan: &DEFAULT_SCAN_ORDER,
474 qmat: &self.qmat[qmat_idx][2],
475 coef_probs: &self.dstate.coef_probs,
477 let mut has_ac = [false; 25];
479 if self.dstate.has_y2 {
480 let pred = &self.pcache.y2_pred;
481 let pidx = pred.xpos + mb_x;
482 let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride];
484 let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams);
485 self.pcache.y2_pred.data[pidx] = has_nz;
486 self.pcache.y2_pred_left = has_nz;
487 has_ac[24] = has_nz > 0;
491 let pred = &mut self.pcache.y2_pred;
492 let pidx = pred.xpos + mb_x;
493 pred.data[pidx] = pred.data[pidx - pred.stride];
497 sbparams.scan = &self.scan;
498 sbparams.qmat = &self.qmat[qmat_idx][0];
502 let pred = &self.pcache.y_pred;
503 let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride;
504 let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride];
506 let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams);
507 self.pcache.y_pred.data[pidx] = has_nz;
508 self.pcache.y_pred_left[by] = has_nz;
509 has_ac[i] = has_nz > 0;
511 sbparams.qmat = &self.qmat[qmat_idx][1];
514 let by = (i >> 1) & 1;
515 let pred = &self.pcache.u_pred;
516 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
517 let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride];
519 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
520 self.pcache.u_pred.data[pidx] = has_nz;
521 self.pcache.u_pred_left[by] = has_nz;
522 has_ac[i] = has_nz > 0;
526 let by = (i >> 1) & 1;
527 let pred = &self.pcache.v_pred;
528 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
529 let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride];
531 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
532 self.pcache.v_pred.data[pidx] = has_nz;
533 self.pcache.v_pred_left[by] = has_nz;
534 has_ac[i] = has_nz > 0;
537 if self.dstate.has_y2 {
538 let y2block = &mut self.coeffs[24];
539 if self.mb_info[mb_idx].mb_type != VPMBType::Intra {
540 let mut dc = y2block[0];
541 let pval = self.dstate.pdc_pred_val;
542 if self.dstate.pdc_pred_count > 3 {
546 if (pval == 0) || (dc == 0) || ((pval ^ dc) < 0) {
547 self.dstate.pdc_pred_val = dc;
548 self.dstate.pdc_pred_count = 0;
549 } else if dc == pval {
550 self.dstate.pdc_pred_count += 1;
559 self.coeffs[i][0] = self.coeffs[24][i];
564 idct4x4(&mut self.coeffs[i]);
566 idct4x4_dc(&mut self.coeffs[i]);
571 fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
572 self.qmat[0][0][0] = Y_DC_QUANTS[y_dc_q];
574 self.qmat[0][0][i] = Y_AC_QUANTS[y_ac_q];
576 self.qmat[0][1][0] = UV_DC_QUANTS[uv_dc_q];
578 self.qmat[0][1][i] = UV_AC_QUANTS[uv_ac_q];
580 self.qmat[0][2][0] = Y2_DC_QUANTS[y2_dc_q];
582 self.qmat[0][2][i] = Y2_AC_QUANTS[y2_ac_q];
584 if let Some(ref feat) = self.dstate.features[0] {
586 let q = feat.def_val[j] as usize;
587 self.qmat[j + 1][0][0] = Y_DC_QUANTS[q];
589 self.qmat[j + 1][0][i] = Y_AC_QUANTS[q];
591 self.qmat[j + 1][1][0] = UV_DC_QUANTS[q];
593 self.qmat[j + 1][1][i] = UV_AC_QUANTS[q];
595 self.qmat[j + 1][2][0] = Y2_DC_QUANTS[q];
597 self.qmat[j + 1][2][i] = Y2_AC_QUANTS[q];
602 fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) {
603 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
606 self.ymodes[iidx + x] = ymode;
608 iidx += self.ymode_stride;
611 fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) {
612 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
615 self.mvs[iidx + x] = mv;
617 iidx += self.mb_w * 4;
620 fn find_mv_pred(&self, mb_x: usize, mb_y: usize) -> ([u8; 4], MV, MV, MV) {
621 const CAND_POS: [(i8, i8, u8, u8); 12] = [
622 (-1, 0, 8, 12), ( 0, -1, 8, 3),
623 (-1, -1, 2, 15), (-1, 1, 2, 12),
624 (-2, 0, 2, 12), ( 0, -2, 2, 3),
625 (-1, -2, 1, 15), (-2, -1, 1, 15),
626 (-2, 1, 1, 12), (-1, 2, 1, 12),
627 (-2, -2, 1, 15), (-2, 2, 1, 12)
630 let mut nearest_mv = ZERO_MV;
631 let mut near_mv = ZERO_MV;
633 let mut ct: [u8; 4] = [0; 4];
635 let start = if self.dstate.version == 0 { 1 } else { 0 };
636 let mvwrap = (self.mb_w as isize) + 1;
637 for (yoff, xoff, weight, blk_no) in CAND_POS.iter() {
638 let cx = (mb_x as isize) + (*xoff as isize);
639 let cy = (mb_y as isize) + (*yoff as isize);
640 let mvpos = cx + cy * mvwrap;
641 if (mvpos < start) || ((mvpos % mvwrap) == (mvwrap - 1)) {
645 let cx = (mvpos % mvwrap) as usize;
646 let cy = (mvpos / mvwrap) as usize;
647 let bx = (*blk_no as usize) & 3;
648 let by = (*blk_no as usize) >> 2;
649 let blk_pos = cx * 4 + bx + (cy * 4 + by) * self.mv_stride;
650 let mv = self.mvs[blk_pos];
656 if (nearest_mv == ZERO_MV) || (nearest_mv == mv) {
659 } else if near_mv == ZERO_MV {
663 idx = if mv == near_mv { 2 } else { 3 };
667 let pred_mv = if ct[1] > ct[2] {
668 if ct[1] >= ct[0] { nearest_mv } else { ZERO_MV }
670 if ct[2] >= ct[0] { near_mv } else { ZERO_MV }
673 let mvprobs = [INTER_MODE_PROBS[ct[0] as usize][0],
674 INTER_MODE_PROBS[ct[1] as usize][1],
675 INTER_MODE_PROBS[ct[2] as usize][2],
676 INTER_MODE_PROBS[ct[2] as usize][3]];
678 (mvprobs, nearest_mv, near_mv, pred_mv)
680 fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV {
681 let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS);
682 let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride;
685 if (mb_x > 0) || (bx > 0) {
692 if (mb_y > 0) || (by > 0) {
693 self.mvs[mvidx - self.mv_stride]
698 SubMVRef::Zero => ZERO_MV,
700 let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]);
701 let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]);
702 pred_mv + MV{ x: dmx, y: dmy }
706 fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> {
707 let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS);
708 let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
710 MVSplitMode::TopBottom => {
711 let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
713 for x in 0..4 { self.mvs[mvidx + x] = top_mv; }
714 mvidx += self.mv_stride;
716 let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv);
718 for x in 0..4 { self.mvs[mvidx + x] = bot_mv; }
719 mvidx += self.mv_stride;
722 MVSplitMode::LeftRight => {
723 let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
724 self.mvs[mvidx + 1] = left_mv;
725 let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv);
727 self.mvs[mvidx + 0] = left_mv;
728 self.mvs[mvidx + 1] = left_mv;
729 self.mvs[mvidx + 2] = right_mv;
730 self.mvs[mvidx + 3] = right_mv;
731 mvidx += self.mv_stride;
734 MVSplitMode::Quarters => {
735 for y in (0..4).step_by(2) {
736 for x in (0..4).step_by(2) {
737 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
738 self.mvs[mvidx + x + 1] = self.mvs[mvidx + x];
741 self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x];
743 mvidx += self.mv_stride * 2;
746 MVSplitMode::Sixteenths => {
749 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
751 mvidx += self.mv_stride;
758 fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool, pitch_mode: u8) {
760 let ydst = &mut dframe.data[dframe.offset[0]..];
761 let ystride = dframe.stride[0];
762 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
764 PITCH_MODE_NORMAL => {
767 add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]);
774 add_coeffs16x1(ydst, yoff, &self.coeffs[y]);
781 add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]);
785 yoff -= 15 * ystride;
788 add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]);
796 add_coeffs4x4(ydst, yoff + x * 4, ystride * 4, &self.coeffs[x + y * 4]);
804 let dst = &mut dframe.data[0..];
805 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
806 let ustride = dframe.stride[1];
807 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
808 let vstride = dframe.stride[2];
809 if (pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_FOUR) {
812 add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]);
813 add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]);
821 add_coeffs4x4(dst, uoff + x * 4, ustride * 2, &self.coeffs[16 + x + y * 2]);
822 add_coeffs4x4(dst, voff + x * 4, vstride * 2, &self.coeffs[20 + x + y * 2]);
829 fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize) -> DecoderResult<()> {
830 let pitch = self.dstate.force_pitch.unwrap_or(0);
831 let pitch_mode = (pitch >> 3) & 3;
833 let mb_idx = mb_x + mb_y * self.mb_w;
834 let has_top = mb_y > 0;
835 let has_left = mb_x > 0;
836 let ydst = &mut dframe.data[dframe.offset[0]..];
837 let ystride = dframe.stride[0];
838 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
839 let ipred_ctx_y = &mut self.dstate.ipred_ctx_y;
840 ipred_ctx_y.has_top = has_top;
841 ipred_ctx_y.has_left = has_left;
842 let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred;
844 ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16);
845 match self.mb_info[mb_idx].ymode {
846 PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y),
847 PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y),
848 PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y),
849 PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y),
853 validate!((pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_X2));
854 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
855 let mut tr_save = [0x80u8; 16];
856 if pitch_mode == PITCH_MODE_X2 {
857 // reorganise coefficient data for interlaced case
858 for y in (0..4).step_by(2) {
860 let mut tmpblock = [0i16; 16 * 2];
861 let eidx = x + y * 4;
862 let oidx = x + y * 4 + 4;
865 tmpblock[i * 8 + 0 + j] = self.coeffs[eidx][i * 4 + j];
866 tmpblock[i * 8 + 4 + j] = self.coeffs[oidx][i * 4 + j];
869 self.coeffs[eidx].copy_from_slice(&tmpblock[0..16]);
870 self.coeffs[oidx].copy_from_slice(&tmpblock[16..32]);
876 ipred_ctx_y.has_left = has_left || x > 0;
877 let bmode = self.ymodes[iidx + x];
878 let cur_yoff = yoff + x * 4;
879 let has_tr = has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1)));
880 let has_dl = ipred_ctx_y.has_left && (y < 3);
881 ipred_ctx_y.fill(ydst, cur_yoff, ystride,
882 if has_tr { 8 } else { 4 },
883 if has_dl { 8 } else { 4 });
886 ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i];
890 tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4];
894 PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y),
895 PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y),
896 PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y),
897 PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y),
898 PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y),
899 PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y),
900 PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y),
901 PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y),
902 PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y),
903 PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y),
906 add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]);
908 ipred_ctx_y.has_top = true;
910 iidx += self.ymode_stride;
913 let dst = &mut dframe.data[0..];
914 let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
915 let ustride = dframe.stride[1];
916 let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
917 let vstride = dframe.stride[2];
918 let ipred_ctx_u = &mut self.dstate.ipred_ctx_u;
919 let ipred_ctx_v = &mut self.dstate.ipred_ctx_v;
920 ipred_ctx_u.has_top = has_top;
921 ipred_ctx_v.has_top = has_top;
922 ipred_ctx_u.has_left = has_left;
923 ipred_ctx_v.has_left = has_left;
924 ipred_ctx_u.fill(dst, uoff, ustride, 8, 8);
925 ipred_ctx_v.fill(dst, voff, vstride, 8, 8);
926 match self.mb_info[mb_idx].uvmode {
927 PredMode::DCPred => {
928 IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u);
929 IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v);
932 IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u);
933 IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v);
936 IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u);
937 IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v);
939 PredMode::TMPred => {
940 IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u);
941 IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v);
945 self.add_residue(dframe, mb_x, mb_y, is_normal, pitch_mode);
948 fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, use_last: bool) {
949 let pitch = self.dstate.force_pitch.unwrap_or(0);
950 let pitch_dmode = (pitch >> 3) & 3;
951 let pitch_smode = pitch & 7;
953 let refframe = (if use_last { self.shuf.get_last() } else { self.shuf.get_golden() }).unwrap();
954 let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV;
955 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
956 let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
958 let dst = &mut dframe.data[0..];
959 let ystride = dframe.stride[0];
960 let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
961 if pitch_smode == 0 {
963 mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16,
964 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf);
968 mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
969 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf);
972 iidx += self.mv_stride;
977 mc_block_special(dst, yoff, ystride, mb_x * 16, mb_y * 16,
978 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2,
979 refframe.clone(), 0, &mut mc_buf, 16, pitch_smode);
983 mc_block_special(dst, yoff + x * 4, ystride,
984 mb_x * 16 + x * 4, mb_y * 16 + y * 4,
985 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2,
986 refframe.clone(), 0, &mut mc_buf, 4, pitch_smode);
989 iidx += self.mv_stride;
994 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
995 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
996 let ustride = dframe.stride[1];
997 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
998 let vstride = dframe.stride[2];
1000 let chroma_mv = self.mvs[iidx];
1002 if pitch_smode == 0 {
1003 mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1004 mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1006 mc_block_special(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y,
1007 refframe.clone(), 1, &mut mc_buf, 8, pitch_smode);
1008 mc_block_special(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y,
1009 refframe.clone(), 2, &mut mc_buf, 8, pitch_smode);
1014 let mut chroma_mv = self.mvs[iidx] + self.mvs[iidx + 1]
1015 + self.mvs[iidx + self.mv_stride]
1016 + self.mvs[iidx + self.mv_stride + 1];
1020 if pitch_smode == 0 {
1021 mc_block4x4(dst, uoff, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1022 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1023 mc_block4x4(dst, voff, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1024 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1026 mc_block_special(dst, uoff, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1027 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf,
1029 mc_block_special(dst, voff, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1030 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf,
1034 uoff += ustride * 4;
1035 voff += vstride * 4;
1036 iidx += 2 * self.mv_stride;
1039 self.add_residue(dframe, mb_x, mb_y, true, pitch_dmode);
1041 fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8) {
1042 const HIGH_EDGE_VAR_THR: [[u8; 64]; 2] = [
1044 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1045 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1046 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
1047 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
1049 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1050 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1051 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
1052 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
1055 let edge_thr = (loop_str as i16) + 2;
1056 let luma_thr = loop_str as i16;
1057 let chroma_thr = (loop_str as i16) * 2;
1058 let inner_thr = if self.dstate.loop_sharpness == 0 {
1061 let bound1 = (9 - self.dstate.loop_sharpness) as i16;
1062 let shift = (self.dstate.loop_sharpness + 3) >> 2;
1063 ((loop_str as i16) >> shift).min(bound1)
1065 let hev_thr = HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize] as i16;
1067 let ystride = dframe.stride[0];
1068 let ustride = dframe.stride[1];
1069 let vstride = dframe.stride[2];
1070 let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
1071 let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride;
1072 let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride;
1074 let (loop_edge, loop_inner) = if self.dstate.lf_simple {
1075 (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc)
1077 (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc)
1081 loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr);
1082 loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr);
1083 loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr);
1086 loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr);
1087 loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr);
1088 loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr);
1092 loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, luma_thr, inner_thr, hev_thr);
1094 loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, chroma_thr, inner_thr, hev_thr);
1095 loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, chroma_thr, inner_thr, hev_thr);
1098 loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, luma_thr, inner_thr, hev_thr);
1100 loop_inner(dframe.data, upos + 4, 1, ustride, 8, chroma_thr, inner_thr, hev_thr);
1101 loop_inner(dframe.data, vpos + 4, 1, vstride, 8, chroma_thr, inner_thr, hev_thr);
1105 impl NADecoder for VP7Decoder {
1106 fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> {
1107 if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
1108 let fmt = YUV420_FORMAT;
1109 let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt);
1110 let myinfo = NACodecTypeInfo::Video(myvinfo.clone());
1111 self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref();
1113 supp.pool_u8.set_dec_bufs(4);
1114 supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?;
1115 self.set_dimensions(myvinfo.get_width(), myvinfo.get_height());
1118 Err(DecoderError::InvalidData)
1121 fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> {
1122 let src = pkt.get_buffer();
1124 validate!(src.len() > 4);
1126 let frame_tag = read_u24le(src.as_slice())?;
1127 self.dstate.is_intra = (frame_tag & 1) == 0;
1128 self.dstate.version = ((frame_tag >> 1) & 7) as u8;
1129 let part2_off = (frame_tag >> 4) as usize;
1130 let part1_off = if self.dstate.version == 0 { 4 } else { 3 };
1132 validate!(src.len() > part1_off + part2_off);
1133 let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?;
1134 let mut bc_main = BoolCoder::new(&src[part1_off + part2_off..])?;
1135 if self.dstate.is_intra {
1136 let width = bc.read_bits(12) as usize;
1137 let height = bc.read_bits(12) as usize;
1138 let _scalev = bc.read_bits(2);
1139 let _scaleh = bc.read_bits(2);
1140 validate!((width > 0) && (height > 0));
1141 self.set_dimensions(width, height);
1143 self.dstate.reset();
1145 if !self.shuf.has_refs() {
1146 return Err(DecoderError::MissingReference);
1150 self.read_features(&mut bc)?;
1152 let y_ac_q = bc.read_bits(7) as usize;
1153 let y_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1154 let y2_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1155 let y2_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1156 let uv_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1157 let uv_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1158 self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
1160 let update_gf = if self.dstate.is_intra { true } else { bc.read_bool() };
1162 let mut has_fading_feature = true;
1163 let mut keep_probs = true;
1164 if self.dstate.version != 0 {
1165 keep_probs = bc.read_bool();
1166 if self.dstate.is_intra {
1167 has_fading_feature = true;
1169 has_fading_feature = bc.read_bool();
1173 if has_fading_feature {
1174 self.dstate.fading = bc.read_bool();
1175 if self.dstate.fading {
1176 self.dstate.fade_alpha = bc.read_sbits(8) as u16;
1177 self.dstate.fade_beta = bc.read_sbits(8) as u16;
1178 if let Some(pframe) = self.shuf.get_last() {
1179 let mut fframe = supp.pool_u8.get_free().unwrap();
1180 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut fframe).unwrap();
1181 fade_frame(pframe, &mut dframe, self.dstate.fade_alpha, self.dstate.fade_beta);
1182 self.shuf.add_frame(fframe);
1186 self.dstate.fading = false;
1189 if self.dstate.version == 0 {
1190 self.dstate.lf_simple = bc.read_bool();
1195 self.scan[i] = DEFAULT_SCAN_ORDER[bc.read_bits(4) as usize];
1199 if self.dstate.version != 0 {
1200 self.dstate.lf_simple = bc.read_bool();
1202 self.dstate.lf_simple = false;
1205 self.dstate.loop_filter_level = bc.read_bits(6) as u8;
1206 self.dstate.loop_sharpness = bc.read_bits(3) as u8;
1208 self.read_dct_coef_prob_upd(&mut bc)?;
1210 if !self.dstate.is_intra {
1211 self.dstate.prob_intra_pred = bc.read_byte();
1212 self.dstate.prob_last_pred = bc.read_byte();
1215 self.dstate.kf_ymode_prob[i] = bc.read_byte();
1220 self.dstate.kf_uvmode_prob[i] = bc.read_byte();
1223 self.read_mv_prob_upd(&mut bc)?;
1226 self.tmp_scan.copy_from_slice(&self.scan);
1229 let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT);
1230 let ret = supp.pool_u8.get_free();
1232 return Err(DecoderError::AllocError);
1234 let mut buf = ret.unwrap();
1235 if buf.get_info() != vinfo {
1237 supp.pool_u8.reset();
1238 supp.pool_u8.prealloc_video(vinfo, 4)?;
1239 let ret = supp.pool_u8.get_free();
1241 return Err(DecoderError::AllocError);
1245 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap();
1248 self.pcache.reset();
1249 self.dstate.pdc_pred_val = 0;
1250 self.dstate.pdc_pred_count = 0;
1251 let mut use_last = true;
1252 for mb_y in 0..self.mb_h {
1253 for mb_x in 0..self.mb_w {
1254 self.decode_mb_features(&mut bc, mb_x, mb_y)?;
1255 self.dstate.has_y2 = true;
1256 if self.dstate.is_intra {
1257 let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS);
1258 if ymode == PredMode::BPred {
1259 self.dstate.has_y2 = false;
1260 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1263 let top_mode = if (y > 0) || (mb_y > 0) {
1264 self.ymodes[iidx + x - self.ymode_stride]
1268 let left_mode = if (x > 0) || (mb_x > 0) {
1269 self.ymodes[iidx + x - 1]
1273 let top_idx = top_mode.to_b_index();
1274 let left_idx = left_mode.to_b_index();
1275 let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]);
1276 self.ymodes[iidx + x] = bmode;
1278 iidx += self.ymode_stride;
1281 self.fill_ymode(mb_x, mb_y, ymode.to_b_mode());
1283 let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS);
1284 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1285 self.mb_info[mb_idx].ymode = ymode;
1286 self.mb_info[mb_idx].uvmode = uvmode;
1287 } else if !bc.read_prob(self.dstate.prob_intra_pred) {
1288 let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob);
1289 if ymode == PredMode::BPred {
1290 self.dstate.has_y2 = false;
1291 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1294 let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS);
1295 self.ymodes[iidx + x] = bmode;
1297 iidx += self.ymode_stride;
1300 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1302 let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob);
1303 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1304 self.mb_info[mb_idx].ymode = ymode;
1305 self.mb_info[mb_idx].uvmode = uvmode;
1306 self.fill_mv(mb_x, mb_y, ZERO_MV);
1308 use_last = !bc.read_prob(self.dstate.prob_last_pred);
1310 let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y);
1311 let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs);
1314 VPMBType::InterNearest => {
1315 self.fill_mv(mb_x, mb_y, nearest_mv);
1317 VPMBType::InterNear => {
1318 self.fill_mv(mb_x, mb_y, near_mv);
1320 VPMBType::InterNoMV => {
1321 self.fill_mv(mb_x, mb_y, ZERO_MV);
1323 VPMBType::InterMV => {
1324 let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]);
1325 let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]);
1326 let new_mv = pred_mv + MV{ x: dmx, y: dmy };
1327 self.fill_mv(mb_x, mb_y, new_mv);
1329 VPMBType::InterFourMV => {
1330 self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?;
1332 _ => unreachable!(),
1335 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1336 self.mb_info[mb_idx].mb_type = mbtype;
1337 self.mb_info[mb_idx].ymode = PredMode::Inter;
1338 self.mb_info[mb_idx].uvmode = PredMode::Inter;
1340 self.decode_residue(&mut bc_main, mb_x, mb_idx);
1341 match self.mb_info[mb_idx].mb_type {
1342 VPMBType::Intra => {
1343 self.recon_intra_mb(&mut dframe, mb_x, mb_y)?;
1346 self.recon_inter_mb(&mut dframe, mb_x, mb_y, use_last);
1349 if let Some(loop_str) = self.dstate.force_loop_str {
1350 self.mb_info[mb_idx].loop_str = loop_str;
1352 self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level;
1354 self.mb_info[mb_idx].upd_gf = self.dstate.force_gf_update;
1357 self.pcache.update_row();
1360 for mb_y in 0..self.mb_h {
1361 for mb_x in 0..self.mb_w {
1362 let loop_str = self.mb_info[mb_idx].loop_str;
1363 self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str);
1367 if !update_gf && self.dstate.features[2].is_some() {
1368 let gf = self.shuf.get_golden().unwrap();
1369 let mut new_gf = supp.pool_u8.get_copy(&gf).unwrap();
1370 let dframe = NASimpleVideoFrame::from_video_buf(&mut new_gf).unwrap();
1372 let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
1373 for mb_y in 0..self.mb_h {
1374 for mb_x in 0..self.mb_w {
1375 if self.mb_info[mb_idx].upd_gf {
1376 mc_block16x16(dframe.data, dframe.offset[0] + mb_x * 16 + mb_y * 16 * dframe.stride[0], dframe.stride[0], mb_x * 16, mb_y * 16, 0, 0, buf.clone(), 0, &mut mc_buf);
1377 mc_block8x8(dframe.data, dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1], dframe.stride[1], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 1, &mut mc_buf);
1378 mc_block8x8(dframe.data, dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2], dframe.stride[2], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 2, &mut mc_buf);
1383 self.shuf.add_golden_frame(new_gf);
1387 self.scan.copy_from_slice(&self.tmp_scan);
1390 self.shuf.add_golden_frame(buf.clone());
1392 self.shuf.add_frame(buf.clone());
1394 let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf));
1395 frm.set_keyframe(self.dstate.is_intra);
1396 frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P });
1399 fn flush(&mut self) {
1404 pub fn get_decoder() -> Box<NADecoder + Send> {
1405 Box::new(VP7Decoder::new())
1410 use nihav_core::codecs::RegisteredDecoders;
1411 use nihav_core::demuxers::RegisteredDemuxers;
1412 use nihav_core::test::dec_video::*;
1413 use crate::codecs::duck_register_all_codecs;
1414 use nihav_commonfmt::demuxers::generic_register_all_demuxers;
1418 let mut dmx_reg = RegisteredDemuxers::new();
1419 generic_register_all_demuxers(&mut dmx_reg);
1420 let mut dec_reg = RegisteredDecoders::new();
1421 duck_register_all_codecs(&mut dec_reg);
1423 //let file = "assets/Duck/potter-40.vp7";
1424 //let file = "assets/Duck/potter-500.vp7";
1425 //let file = "assets/Duck/starsky-700.vp7";
1426 //let file = "assets/Duck/taking-700.vp7";
1427 //let file = "assets/Duck/troy-700.vp7";
1428 let file = "assets/Duck/interlaced_blit_pitch.avi";
1429 //let file = "assets/Duck/vp7.avi";
1430 test_file_decoding("avi", file, Some(12), true, false, None/*Some("vp7")*/, &dmx_reg, &dec_reg);
1434 /*const DEFAULT_ZIGZAG: [usize; 16] = [
1440 const DEFAULT_SCAN_ORDER: [usize; 16] = [
1447 const Y_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1448 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1449 VPTreeDef::Index(4), VPTreeDef::Index(6),
1450 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Value(PredMode::HPred),
1451 VPTreeDef::Value(PredMode::TMPred), VPTreeDef::Value(PredMode::BPred),
1453 const KF_Y_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1454 VPTreeDef::Value(PredMode::BPred), VPTreeDef::Index(2),
1455 VPTreeDef::Index(4), VPTreeDef::Index(6),
1456 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Value(PredMode::VPred),
1457 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Value(PredMode::TMPred),
1459 const UV_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1460 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1461 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Index(4),
1462 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Value(PredMode::TMPred)
1464 const B_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1465 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1466 VPTreeDef::Value(PredMode::TMPred), VPTreeDef::Index(4),
1467 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Index(6),
1468 VPTreeDef::Index(8), VPTreeDef::Index(12),
1469 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Index(10),
1470 VPTreeDef::Value(PredMode::RDPred), VPTreeDef::Value(PredMode::VRPred),
1471 VPTreeDef::Value(PredMode::LDPred), VPTreeDef::Index(14),
1472 VPTreeDef::Value(PredMode::VLPred), VPTreeDef::Index(16),
1473 VPTreeDef::Value(PredMode::HDPred), VPTreeDef::Value(PredMode::HUPred)
1476 const FEATURE_TREE: &[VPTreeDef<usize>] = &[
1477 VPTreeDef::Index(2), VPTreeDef::Index(4),
1478 VPTreeDef::Value(0), VPTreeDef::Value(1),
1479 VPTreeDef::Value(2), VPTreeDef::Value(3)
1482 const COEF_TREE: &[VPTreeDef<DCTToken>] = &[
1483 VPTreeDef::Value(DCTToken::EOB), VPTreeDef::Index(2),
1484 VPTreeDef::Value(DCTToken::Zero), VPTreeDef::Index(4),
1485 VPTreeDef::Value(DCTToken::One), VPTreeDef::Index(6),
1486 VPTreeDef::Index(8), VPTreeDef::Index(12),
1487 VPTreeDef::Value(DCTToken::Two), VPTreeDef::Index(10),
1488 VPTreeDef::Value(DCTToken::Three), VPTreeDef::Value(DCTToken::Four),
1489 VPTreeDef::Index(14), VPTreeDef::Index(16),
1490 VPTreeDef::Value(DCTToken::Cat1), VPTreeDef::Value(DCTToken::Cat2),
1491 VPTreeDef::Index(18), VPTreeDef::Index(20),
1492 VPTreeDef::Value(DCTToken::Cat3), VPTreeDef::Value(DCTToken::Cat4),
1493 VPTreeDef::Value(DCTToken::Cat5), VPTreeDef::Value(DCTToken::Cat6)
1496 const MV_REF_TREE: &[VPTreeDef<VPMBType>] = &[
1497 VPTreeDef::Value(VPMBType::InterNoMV), VPTreeDef::Index(2),
1498 VPTreeDef::Value(VPMBType::InterNearest), VPTreeDef::Index(4),
1499 VPTreeDef::Value(VPMBType::InterNear), VPTreeDef::Index(6),
1500 VPTreeDef::Value(VPMBType::InterMV), VPTreeDef::Value(VPMBType::InterFourMV)
1502 const SMALL_MV_TREE: &[VPTreeDef<i16>] = &[
1503 VPTreeDef::Index(2), VPTreeDef::Index(8),
1504 VPTreeDef::Index(4), VPTreeDef::Index(6),
1505 VPTreeDef::Value(0), VPTreeDef::Value(1),
1506 VPTreeDef::Value(2), VPTreeDef::Value(3),
1507 VPTreeDef::Index(10), VPTreeDef::Index(12),
1508 VPTreeDef::Value(4), VPTreeDef::Value(5),
1509 VPTreeDef::Value(6), VPTreeDef::Value(7)
1511 const MV_SPLIT_MODE_TREE: &[VPTreeDef<MVSplitMode>] = &[
1512 VPTreeDef::Value(MVSplitMode::Sixteenths), VPTreeDef::Index(2),
1513 VPTreeDef::Value(MVSplitMode::Quarters), VPTreeDef::Index(4),
1514 VPTreeDef::Value(MVSplitMode::TopBottom), VPTreeDef::Value(MVSplitMode::LeftRight)
1516 const SUB_MV_REF_TREE: &[VPTreeDef<SubMVRef>] = &[
1517 VPTreeDef::Value(SubMVRef::Left), VPTreeDef::Index(2),
1518 VPTreeDef::Value(SubMVRef::Above), VPTreeDef::Index(4),
1519 VPTreeDef::Value(SubMVRef::Zero), VPTreeDef::Value(SubMVRef::New)