1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_core::data::GenericCache;
4 use super::vpcommon::*;
8 enum VPTreeDef<T: Copy> {
14 fn read_tree<T:Copy>(&mut self, tree_def: &[VPTreeDef<T>], tree_prob: &[u8]) -> T;
17 impl<'a> VPTreeReader for BoolCoder<'a> {
18 fn read_tree<T:Copy>(&mut self, tree_def: &[VPTreeDef<T>], tree_prob: &[u8]) -> T {
22 let bit = self.read_prob(tree_prob[idx >> 1]);
23 match tree_def[idx + (bit as usize)] {
24 VPTreeDef::Value(v) => return v,
25 VPTreeDef::Index(ix) => { idx = ix as usize; },
32 #[derive(Clone,Copy,PartialEq,Debug)]
40 //sub-block prediction modes
51 impl Default for PredMode {
52 fn default() -> Self { PredMode::DCPred }
56 fn to_b_mode(self) -> Self {
57 if self == PredMode::DCPred {
63 fn to_b_index(self) -> usize {
65 PredMode::DCPred => 0,
66 PredMode::TMPred => 1,
69 PredMode::LDPred => 4,
70 PredMode::RDPred => 5,
71 PredMode::VRPred => 6,
72 PredMode::VLPred => 7,
73 PredMode::HDPred => 8,
74 PredMode::HUPred => 9,
80 const PITCH_MODE_NORMAL: u8 = 0;
81 const PITCH_MODE_FOUR: u8 = 1;
82 const PITCH_MODE_X2: u8 = 2;
83 const PITCH_MODE_X4: u8 = 3;
85 #[derive(Clone,Copy,Default)]
92 #[derive(Clone,Copy,PartialEq)]
108 fn expand_token(bc: &mut BoolCoder, token: DCTToken) -> i16 {
111 DCTToken::Zero => return 0,
112 DCTToken::One => return if bc.read_bool() { -1 } else { 1 },
113 DCTToken::Two => return if bc.read_bool() { -2 } else { 2 },
114 DCTToken::Three => return if bc.read_bool() { -3 } else { 3 },
115 DCTToken::Four => return if bc.read_bool() { -4 } else { 4 },
116 DCTToken::Cat1 => cat = 0,
117 DCTToken::Cat2 => cat = 1,
118 DCTToken::Cat3 => cat = 2,
119 DCTToken::Cat4 => cat = 3,
120 DCTToken::Cat5 => cat = 4,
121 DCTToken::Cat6 => cat = 5,
125 let add_probs = &VP56_COEF_ADD_PROBS[cat];
126 for prob in add_probs.iter() {
127 if *prob == 128 { break; }
128 add = (add << 1) | (bc.read_prob(*prob) as i16);
130 let sign = bc.read_bool();
131 let level = VP56_COEF_BASE[cat] + add;
139 struct SBParams<'a> {
140 coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4],
141 scan: &'a [usize; 16],
145 fn decode_subblock<'a>(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 {
146 const COEF_BANDS: [usize; 16] = [ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
149 let start = if ctype != 0 { 0 } else { 1 };
151 let mut cval = pctx as usize;
152 for idx in start..16 {
153 let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval];
154 let tok = bc.read_tree(COEF_TREE, probs);
155 if tok == DCTToken::EOB { break; }
156 let level = expand_token(bc, tok);
157 coeffs[sbparams.scan[idx]] = level.wrapping_mul(sbparams.qmat[idx]);
158 cval = level.abs().min(2) as usize;
161 if has_nz > 0 { 1 } else { 0 }
164 #[derive(Clone,Copy,Default)]
174 struct DecoderState {
175 features: [Option<MBFeature>; 4],
182 loop_filter_level: u8,
188 kf_ymode_prob: [u8; 4],
189 kf_uvmode_prob: [u8; 3],
194 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
195 mv_probs: [[u8; 17]; 2],
197 force_quant: Option<u8>,
198 force_loop_str: Option<u8>,
199 force_gf_update: bool,
200 force_pitch: Option<u8>,
203 pdc_pred_val: [i16; 2],
204 pdc_pred_count: [usize; 2],
206 ipred_ctx_y: IPredContext,
207 ipred_ctx_u: IPredContext,
208 ipred_ctx_v: IPredContext,
212 fn reset(&mut self) {
213 self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS);
214 self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS);
215 self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS);
216 self.mv_probs.copy_from_slice(&DEFAULT_MV_PROBS);
220 #[derive(Clone,Copy,Debug,PartialEq)]
228 #[derive(Clone,Copy,Debug,PartialEq)]
236 fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 17]) -> i16 {
237 const LONG_VECTOR_ORDER: [usize; 7] = [ 0, 1, 2, 7, 6, 5, 4 ];
239 let val = if !bc.read_prob(probs[0]) {
240 bc.read_tree(SMALL_MV_TREE, &probs[2..9])
242 let raw_probs = &probs[9..];
244 for ord in LONG_VECTOR_ORDER.iter() {
245 raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord;
247 if (raw & 0xF0) != 0 {
248 raw |= (bc.read_prob(raw_probs[3]) as i16) << 3;
254 if (val == 0) || !bc.read_prob(probs[1]) {
262 y_pred: GenericCache<u8>,
263 u_pred: GenericCache<u8>,
264 v_pred: GenericCache<u8>,
265 y2_pred: GenericCache<u8>,
266 y_pred_left: [u8; 4],
267 u_pred_left: [u8; 2],
268 v_pred_left: [u8; 2],
275 y_pred: GenericCache::new(1, 1, 0),
276 u_pred: GenericCache::new(1, 1, 0),
277 v_pred: GenericCache::new(1, 1, 0),
278 y2_pred: GenericCache::new(1, 1, 0),
285 fn resize(&mut self, mb_w: usize) {
286 self.y_pred = GenericCache::new(4, mb_w * 4 + 1, 0);
287 self.u_pred = GenericCache::new(2, mb_w * 2 + 1, 0);
288 self.v_pred = GenericCache::new(2, mb_w * 2 + 1, 0);
289 self.y2_pred = GenericCache::new(1, mb_w + 1, 0);
291 fn reset(&mut self) {
295 self.y2_pred.reset();
296 self.y_pred_left = [0; 4];
297 self.u_pred_left = [0; 2];
298 self.v_pred_left = [0; 2];
299 self.y2_pred_left = 0;
301 fn update_row(&mut self) {
302 self.y_pred.update_row();
303 self.u_pred.update_row();
304 self.v_pred.update_row();
305 self.y2_pred.update_row();
310 info: NACodecInfoRef,
317 mb_info: Vec<MBInfo>,
321 ymodes: Vec<PredMode>,
323 uvmodes: Vec<PredMode>,
324 uvmode_stride: usize,
326 dstate: DecoderState,
329 coeffs: [[i16; 16]; 25],
331 qmat: [[[i16; 16]; 3]; 5],
333 mc_buf: NAVideoBufferRef<u8>,
335 tmp_scan: [usize; 16],
340 let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap();
341 let mut scan = [0; 16];
342 scan.copy_from_slice(&DEFAULT_SCAN_ORDER);
343 let mc_buf = vt.get_vbuf().unwrap();
345 info: NACodecInfoRef::default(),
347 shuf: VPShuffler::new(),
361 dstate: DecoderState::default(),
362 pcache: PredCache::new(),
364 coeffs: [[0; 16]; 25],
367 qmat: [[[0; 16]; 3]; 5],
372 fn set_dimensions(&mut self, width: usize, height: usize) {
373 if (width == self.width) && (height == self.height) {
377 self.height = height;
378 self.mb_w = (self.width + 15) >> 4;
379 self.mb_h = (self.height + 15) >> 4;
380 self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
381 self.mv_stride = self.mb_w * 4;
382 self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV);
384 self.ymode_stride = self.mb_w * 4;
385 self.uvmode_stride = self.mb_w;
386 self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default());
387 self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default());
389 self.pcache.resize(self.mb_w);
391 fn read_features(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
392 for (i, feat) in self.dstate.features.iter_mut().enumerate() {
394 let mut feature = MBFeature::default();
395 feature.present_prob = bc.read_byte();
396 for tp in feature.tree_probs.iter_mut() {
398 *tp = bc.read_byte();
404 let fbits = match i {
407 _ => if self.dstate.version == 0 { 8 } else { 5 },
409 for dval in feature.def_val.iter_mut() {
411 *dval = bc.read_bits(fbits) as u8;
417 *feat = Some(feature);
424 fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
429 if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) {
430 self.dstate.coef_probs[i][j][k][l] = bc.read_byte();
438 fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
441 if bc.read_prob(MV_UPDATE_PROBS[comp][i]) {
442 self.dstate.mv_probs[comp][i] = bc.read_probability();
448 fn decode_mb_features(&mut self, bc: &mut BoolCoder, _mb_x: usize, _mb_y: usize) -> DecoderResult<()> {
449 self.dstate.force_quant = None;
450 self.dstate.force_loop_str = None;
451 self.dstate.force_gf_update = false;
452 self.dstate.force_pitch = None;
453 for (i, feat) in self.dstate.features.iter().enumerate() {
454 if let Some(feat) = feat {
455 let present = bc.read_prob(feat.present_prob);
457 let ftype_idx = bc.read_tree(FEATURE_TREE, &feat.tree_probs);
458 let val = feat.def_val[ftype_idx];
460 0 => self.dstate.force_quant = Some(ftype_idx as u8),
461 1 => self.dstate.force_loop_str = Some(val),
462 2 => self.dstate.force_gf_update = true,
463 _ => self.dstate.force_pitch = Some(val),
470 fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_idx: usize, use_last: bool) {
471 let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 };
472 let mut sbparams = SBParams {
473 scan: &DEFAULT_SCAN_ORDER,
474 qmat: &self.qmat[qmat_idx][2],
475 coef_probs: &self.dstate.coef_probs,
477 let mut has_ac = [false; 25];
479 if self.dstate.has_y2 {
480 let pred = &self.pcache.y2_pred;
481 let pidx = pred.xpos + mb_x;
482 let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride];
484 let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams);
485 self.pcache.y2_pred.data[pidx] = has_nz;
486 self.pcache.y2_pred_left = has_nz;
487 has_ac[24] = has_nz > 0;
491 let pred = &mut self.pcache.y2_pred;
492 let pidx = pred.xpos + mb_x;
493 pred.data[pidx] = pred.data[pidx - pred.stride];
497 sbparams.scan = &self.scan;
498 sbparams.qmat = &self.qmat[qmat_idx][0];
502 let pred = &self.pcache.y_pred;
503 let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride;
504 let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride];
506 let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams);
507 self.pcache.y_pred.data[pidx] = has_nz;
508 self.pcache.y_pred_left[by] = has_nz;
509 has_ac[i] = has_nz > 0;
511 sbparams.qmat = &self.qmat[qmat_idx][1];
514 let by = (i >> 1) & 1;
515 let pred = &self.pcache.u_pred;
516 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
517 let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride];
519 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
520 self.pcache.u_pred.data[pidx] = has_nz;
521 self.pcache.u_pred_left[by] = has_nz;
522 has_ac[i] = has_nz > 0;
526 let by = (i >> 1) & 1;
527 let pred = &self.pcache.v_pred;
528 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
529 let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride];
531 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
532 self.pcache.v_pred.data[pidx] = has_nz;
533 self.pcache.v_pred_left[by] = has_nz;
534 has_ac[i] = has_nz > 0;
537 if self.dstate.has_y2 {
538 let y2block = &mut self.coeffs[24];
539 if self.mb_info[mb_idx].mb_type != VPMBType::Intra {
540 let mut dc = y2block[0];
541 let pdc_idx = if use_last { 0 } else { 1 };
542 let pval = self.dstate.pdc_pred_val[pdc_idx];
544 if self.dstate.pdc_pred_count[pdc_idx] > 3 {
548 if (pval == 0) || (dc == 0) || ((pval ^ dc) < 0) {
549 self.dstate.pdc_pred_count[pdc_idx] = 0;
550 } else if dc == pval {
551 self.dstate.pdc_pred_count[pdc_idx] += 1;
553 self.dstate.pdc_pred_val[pdc_idx] = dc;
557 } else if y2block[0] != 0 {
561 self.coeffs[i][0] = self.coeffs[24][i];
566 idct4x4(&mut self.coeffs[i]);
567 } else if self.coeffs[i][0] != 0 {
568 idct4x4_dc(&mut self.coeffs[i]);
573 fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
574 self.qmat[0][0][0] = Y_DC_QUANTS[y_dc_q];
576 self.qmat[0][0][i] = Y_AC_QUANTS[y_ac_q];
578 self.qmat[0][1][0] = UV_DC_QUANTS[uv_dc_q];
580 self.qmat[0][1][i] = UV_AC_QUANTS[uv_ac_q];
582 self.qmat[0][2][0] = Y2_DC_QUANTS[y2_dc_q];
584 self.qmat[0][2][i] = Y2_AC_QUANTS[y2_ac_q];
586 if let Some(ref feat) = self.dstate.features[0] {
588 let q = feat.def_val[j] as usize;
589 self.qmat[j + 1][0][0] = Y_DC_QUANTS[q];
591 self.qmat[j + 1][0][i] = Y_AC_QUANTS[q];
593 self.qmat[j + 1][1][0] = UV_DC_QUANTS[q];
595 self.qmat[j + 1][1][i] = UV_AC_QUANTS[q];
597 self.qmat[j + 1][2][0] = Y2_DC_QUANTS[q];
599 self.qmat[j + 1][2][i] = Y2_AC_QUANTS[q];
604 fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) {
605 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
608 self.ymodes[iidx + x] = ymode;
610 iidx += self.ymode_stride;
613 fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) {
614 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
617 self.mvs[iidx + x] = mv;
619 iidx += self.mb_w * 4;
622 fn find_mv_pred(&self, mb_x: usize, mb_y: usize) -> ([u8; 4], MV, MV, MV) {
623 const CAND_POS: [(i8, i8, u8, u8); 12] = [
624 (-1, 0, 8, 12), ( 0, -1, 8, 3),
625 (-1, -1, 2, 15), (-1, 1, 2, 12),
626 (-2, 0, 2, 12), ( 0, -2, 2, 3),
627 (-1, -2, 1, 15), (-2, -1, 1, 15),
628 (-2, 1, 1, 12), (-1, 2, 1, 12),
629 (-2, -2, 1, 15), (-2, 2, 1, 12)
632 let mut nearest_mv = ZERO_MV;
633 let mut near_mv = ZERO_MV;
635 let mut ct: [u8; 4] = [0; 4];
637 let start = if self.dstate.version == 0 { 1 } else { 0 };
638 let mvwrap = (self.mb_w as isize) + 1;
639 for (yoff, xoff, weight, blk_no) in CAND_POS.iter() {
640 let cx = (mb_x as isize) + (*xoff as isize);
641 let cy = (mb_y as isize) + (*yoff as isize);
642 let mvpos = cx + cy * mvwrap;
643 if (mvpos < start) || ((mvpos % mvwrap) == (mvwrap - 1)) {
647 let cx = (mvpos % mvwrap) as usize;
648 let cy = (mvpos / mvwrap) as usize;
649 let bx = (*blk_no as usize) & 3;
650 let by = (*blk_no as usize) >> 2;
651 let blk_pos = cx * 4 + bx + (cy * 4 + by) * self.mv_stride;
652 let mv = self.mvs[blk_pos];
658 if (nearest_mv == ZERO_MV) || (nearest_mv == mv) {
661 } else if near_mv == ZERO_MV {
665 idx = if mv == near_mv { 2 } else { 3 };
669 let pred_mv = if ct[1] > ct[2] {
670 if ct[1] >= ct[0] { nearest_mv } else { ZERO_MV }
672 if ct[2] >= ct[0] { near_mv } else { ZERO_MV }
675 let mvprobs = [INTER_MODE_PROBS[ct[0] as usize][0],
676 INTER_MODE_PROBS[ct[1] as usize][1],
677 INTER_MODE_PROBS[ct[2] as usize][2],
678 INTER_MODE_PROBS[ct[2] as usize][3]];
680 (mvprobs, nearest_mv, near_mv, pred_mv)
682 fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV {
683 let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS);
684 let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride;
687 if (mb_x > 0) || (bx > 0) {
694 if (mb_y > 0) || (by > 0) {
695 self.mvs[mvidx - self.mv_stride]
700 SubMVRef::Zero => ZERO_MV,
702 let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]);
703 let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]);
704 pred_mv + MV{ x: dmx, y: dmy }
708 fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> {
709 let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS);
710 let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
712 MVSplitMode::TopBottom => {
713 let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
715 for x in 0..4 { self.mvs[mvidx + x] = top_mv; }
716 mvidx += self.mv_stride;
718 let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv);
720 for x in 0..4 { self.mvs[mvidx + x] = bot_mv; }
721 mvidx += self.mv_stride;
724 MVSplitMode::LeftRight => {
725 let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
726 self.mvs[mvidx + 1] = left_mv;
727 let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv);
729 self.mvs[mvidx + 0] = left_mv;
730 self.mvs[mvidx + 1] = left_mv;
731 self.mvs[mvidx + 2] = right_mv;
732 self.mvs[mvidx + 3] = right_mv;
733 mvidx += self.mv_stride;
736 MVSplitMode::Quarters => {
737 for y in (0..4).step_by(2) {
738 for x in (0..4).step_by(2) {
739 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
740 self.mvs[mvidx + x + 1] = self.mvs[mvidx + x];
743 self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x];
745 mvidx += self.mv_stride * 2;
748 MVSplitMode::Sixteenths => {
751 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
753 mvidx += self.mv_stride;
760 fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool, pitch_mode: u8) {
762 let ydst = &mut dframe.data[dframe.offset[0]..];
763 let ystride = dframe.stride[0];
764 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
766 PITCH_MODE_NORMAL => {
769 add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]);
776 add_coeffs16x1(ydst, yoff, &self.coeffs[y]);
783 add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]);
787 yoff -= 15 * ystride;
790 add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]);
798 add_coeffs4x4(ydst, yoff + x * 4, ystride * 4, &self.coeffs[x + y * 4]);
806 let dst = &mut dframe.data[0..];
807 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
808 let ustride = dframe.stride[1];
809 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
810 let vstride = dframe.stride[2];
811 if (pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_FOUR) {
814 add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]);
815 add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]);
823 add_coeffs4x4(dst, uoff + x * 4, ustride * 2, &self.coeffs[16 + x + y * 2]);
824 add_coeffs4x4(dst, voff + x * 4, vstride * 2, &self.coeffs[20 + x + y * 2]);
831 fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize) -> DecoderResult<()> {
832 let pitch = self.dstate.force_pitch.unwrap_or(0);
833 let pitch_mode = (pitch >> 3) & 3;
835 let mb_idx = mb_x + mb_y * self.mb_w;
836 let has_top = mb_y > 0;
837 let has_left = mb_x > 0;
838 let ydst = &mut dframe.data[dframe.offset[0]..];
839 let ystride = dframe.stride[0];
840 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
841 let ipred_ctx_y = &mut self.dstate.ipred_ctx_y;
842 ipred_ctx_y.has_top = has_top;
843 ipred_ctx_y.has_left = has_left;
844 let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred;
846 ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16);
847 match self.mb_info[mb_idx].ymode {
848 PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y),
849 PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y),
850 PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y),
851 PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y),
855 validate!((pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_X2));
856 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
857 let mut tr_save = [0x80u8; 16];
858 if pitch_mode == PITCH_MODE_X2 {
859 // reorganise coefficient data for interlaced case
860 for y in (0..4).step_by(2) {
862 let mut tmpblock = [0i16; 16 * 2];
863 let eidx = x + y * 4;
864 let oidx = x + y * 4 + 4;
867 tmpblock[i * 8 + 0 + j] = self.coeffs[eidx][i * 4 + j];
868 tmpblock[i * 8 + 4 + j] = self.coeffs[oidx][i * 4 + j];
871 self.coeffs[eidx].copy_from_slice(&tmpblock[0..16]);
872 self.coeffs[oidx].copy_from_slice(&tmpblock[16..32]);
876 let tr_edge = if has_top { ydst[yoff - ystride + 15] } else { 0x80 };
879 ipred_ctx_y.has_left = has_left || x > 0;
880 let bmode = self.ymodes[iidx + x];
881 let cur_yoff = yoff + x * 4;
882 let has_tr = ipred_ctx_y.has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1)));
883 let has_dl = ipred_ctx_y.has_left && (x == 0) && (y < 3);
884 ipred_ctx_y.fill(ydst, cur_yoff, ystride,
885 if has_tr { 8 } else { 4 },
886 if has_dl { 8 } else { 4 });
889 ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i];
893 tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4];
896 if (mb_x == self.mb_w - 1) && has_top && (x == 3) {
898 ipred_ctx_y.top[i + 4] = tr_edge;
902 PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y),
903 PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y),
904 PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y),
905 PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y),
906 PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y),
907 PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y),
908 PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y),
909 PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y),
910 PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y),
911 PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y),
914 add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]);
916 ipred_ctx_y.has_top = true;
918 iidx += self.ymode_stride;
921 let dst = &mut dframe.data[0..];
922 let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
923 let ustride = dframe.stride[1];
924 let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
925 let vstride = dframe.stride[2];
926 let ipred_ctx_u = &mut self.dstate.ipred_ctx_u;
927 let ipred_ctx_v = &mut self.dstate.ipred_ctx_v;
928 ipred_ctx_u.has_top = has_top;
929 ipred_ctx_v.has_top = has_top;
930 ipred_ctx_u.has_left = has_left;
931 ipred_ctx_v.has_left = has_left;
932 ipred_ctx_u.fill(dst, uoff, ustride, 8, 8);
933 ipred_ctx_v.fill(dst, voff, vstride, 8, 8);
934 match self.mb_info[mb_idx].uvmode {
935 PredMode::DCPred => {
936 IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u);
937 IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v);
940 IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u);
941 IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v);
944 IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u);
945 IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v);
947 PredMode::TMPred => {
948 IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u);
949 IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v);
953 self.add_residue(dframe, mb_x, mb_y, is_normal, pitch_mode);
956 fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, use_last: bool) {
957 let pitch = self.dstate.force_pitch.unwrap_or(0);
958 let pitch_dmode = (pitch >> 3) & 3;
959 let pitch_smode = pitch & 7;
961 let refframe = (if use_last { self.shuf.get_last() } else { self.shuf.get_golden() }).unwrap();
962 let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV;
963 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
964 let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
966 let dst = &mut dframe.data[0..];
967 let ystride = dframe.stride[0];
968 let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
969 if pitch_smode == 0 {
971 mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16,
972 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf);
976 mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
977 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf);
980 iidx += self.mv_stride;
985 mc_block_special(dst, yoff, ystride, mb_x * 16, mb_y * 16,
986 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2,
987 refframe.clone(), 0, &mut mc_buf, 16, pitch_smode);
991 mc_block_special(dst, yoff + x * 4, ystride,
992 mb_x * 16 + x * 4, mb_y * 16 + y * 4,
993 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2,
994 refframe.clone(), 0, &mut mc_buf, 4, pitch_smode);
997 iidx += self.mv_stride;
1002 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
1003 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
1004 let ustride = dframe.stride[1];
1005 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
1006 let vstride = dframe.stride[2];
1008 let chroma_mv = self.mvs[iidx];
1010 if pitch_smode == 0 {
1011 mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1012 mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1014 mc_block_special(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y,
1015 refframe.clone(), 1, &mut mc_buf, 8, pitch_smode);
1016 mc_block_special(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y,
1017 refframe.clone(), 2, &mut mc_buf, 8, pitch_smode);
1022 let mut chroma_mv = self.mvs[iidx + x * 2] + self.mvs[iidx + x * 2 + 1]
1023 + self.mvs[iidx + x * 2 + self.mv_stride]
1024 + self.mvs[iidx + x * 2 + self.mv_stride + 1];
1025 if chroma_mv.x < 0 {
1030 if chroma_mv.y < 0 {
1038 if pitch_smode == 0 {
1039 mc_block4x4(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1040 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1041 mc_block4x4(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1042 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1044 mc_block_special(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1045 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf,
1047 mc_block_special(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1048 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf,
1052 uoff += ustride * 4;
1053 voff += vstride * 4;
1054 iidx += 2 * self.mv_stride;
1057 self.add_residue(dframe, mb_x, mb_y, true, pitch_dmode);
1059 fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8) {
1060 const HIGH_EDGE_VAR_THR: [[u8; 64]; 2] = [
1062 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1063 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1064 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
1065 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
1067 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1068 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1069 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
1070 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
1073 let edge_thr = (loop_str as i16) + 2;
1074 let luma_thr = loop_str as i16;
1075 let chroma_thr = (loop_str as i16) * 2;
1076 let inner_thr = if self.dstate.loop_sharpness == 0 {
1079 let bound1 = (9 - self.dstate.loop_sharpness) as i16;
1080 let shift = (self.dstate.loop_sharpness + 3) >> 2;
1081 ((loop_str as i16) >> shift).min(bound1)
1083 let hev_thr = HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize] as i16;
1085 let ystride = dframe.stride[0];
1086 let ustride = dframe.stride[1];
1087 let vstride = dframe.stride[2];
1088 let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
1089 let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride;
1090 let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride;
1092 let (loop_edge, loop_inner) = if self.dstate.lf_simple {
1093 (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc)
1095 (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc)
1099 loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr);
1100 loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr);
1101 loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr);
1104 loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr);
1105 loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr);
1106 loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr);
1110 loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, luma_thr, inner_thr, hev_thr);
1112 loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, chroma_thr, inner_thr, hev_thr);
1113 loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, chroma_thr, inner_thr, hev_thr);
1116 loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, luma_thr, inner_thr, hev_thr);
1118 loop_inner(dframe.data, upos + 4, 1, ustride, 8, chroma_thr, inner_thr, hev_thr);
1119 loop_inner(dframe.data, vpos + 4, 1, vstride, 8, chroma_thr, inner_thr, hev_thr);
1123 impl NADecoder for VP7Decoder {
1124 fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> {
1125 if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
1126 let fmt = YUV420_FORMAT;
1127 let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt);
1128 let myinfo = NACodecTypeInfo::Video(myvinfo.clone());
1129 self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref();
1131 supp.pool_u8.set_dec_bufs(4);
1132 supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?;
1133 self.set_dimensions(myvinfo.get_width(), myvinfo.get_height());
1136 Err(DecoderError::InvalidData)
1139 fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> {
1140 let src = pkt.get_buffer();
1142 validate!(src.len() > 4);
1144 let frame_tag = read_u24le(src.as_slice())?;
1145 self.dstate.is_intra = (frame_tag & 1) == 0;
1146 self.dstate.version = ((frame_tag >> 1) & 7) as u8;
1147 let part2_off = (frame_tag >> 4) as usize;
1148 let part1_off = if self.dstate.version == 0 { 4 } else { 3 };
1150 validate!(src.len() > part1_off + part2_off);
1151 let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?;
1152 let mut bc_main = BoolCoder::new(&src[part1_off + part2_off..])?;
1153 if self.dstate.is_intra {
1154 let width = bc.read_bits(12) as usize;
1155 let height = bc.read_bits(12) as usize;
1156 let _scalev = bc.read_bits(2);
1157 let _scaleh = bc.read_bits(2);
1158 validate!((width > 0) && (height > 0));
1159 self.set_dimensions(width, height);
1161 self.dstate.reset();
1162 self.scan.copy_from_slice(&DEFAULT_SCAN_ORDER);
1164 if !self.shuf.has_refs() {
1165 return Err(DecoderError::MissingReference);
1169 self.read_features(&mut bc)?;
1171 let y_ac_q = bc.read_bits(7) as usize;
1172 let y_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1173 let y2_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1174 let y2_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1175 let uv_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1176 let uv_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
1177 self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
1179 let update_gf = if self.dstate.is_intra { true } else { bc.read_bool() };
1181 let mut has_fading_feature = true;
1182 let mut keep_probs = true;
1183 if self.dstate.version != 0 {
1184 keep_probs = bc.read_bool();
1185 if self.dstate.is_intra {
1186 has_fading_feature = true;
1188 has_fading_feature = bc.read_bool();
1192 if has_fading_feature {
1193 self.dstate.fading = bc.read_bool();
1194 if self.dstate.fading {
1195 self.dstate.fade_alpha = bc.read_sbits(8) as u16;
1196 self.dstate.fade_beta = bc.read_sbits(8) as u16;
1197 if let Some(pframe) = self.shuf.get_last() {
1198 let mut fframe = supp.pool_u8.get_free().unwrap();
1199 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut fframe).unwrap();
1200 fade_frame(pframe, &mut dframe, self.dstate.fade_alpha, self.dstate.fade_beta);
1201 self.shuf.add_frame(fframe);
1205 self.dstate.fading = false;
1208 if self.dstate.version == 0 {
1209 self.dstate.lf_simple = bc.read_bool();
1214 self.scan[i] = DEFAULT_SCAN_ORDER[bc.read_bits(4) as usize];
1218 if self.dstate.version != 0 {
1219 self.dstate.lf_simple = bc.read_bool();
1221 self.dstate.lf_simple = false;
1224 self.dstate.loop_filter_level = bc.read_bits(6) as u8;
1225 self.dstate.loop_sharpness = bc.read_bits(3) as u8;
1227 self.read_dct_coef_prob_upd(&mut bc)?;
1229 if !self.dstate.is_intra {
1230 self.dstate.prob_intra_pred = bc.read_byte();
1231 self.dstate.prob_last_pred = bc.read_byte();
1234 self.dstate.kf_ymode_prob[i] = bc.read_byte();
1239 self.dstate.kf_uvmode_prob[i] = bc.read_byte();
1242 self.read_mv_prob_upd(&mut bc)?;
1245 self.tmp_scan.copy_from_slice(&self.scan);
1248 let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT);
1249 let ret = supp.pool_u8.get_free();
1251 return Err(DecoderError::AllocError);
1253 let mut buf = ret.unwrap();
1254 if buf.get_info() != vinfo {
1256 supp.pool_u8.reset();
1257 supp.pool_u8.prealloc_video(vinfo, 4)?;
1258 let ret = supp.pool_u8.get_free();
1260 return Err(DecoderError::AllocError);
1264 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap();
1267 self.pcache.reset();
1268 if self.dstate.is_intra || (self.dstate.version > 0) {
1269 self.dstate.pdc_pred_val = [0; 2];
1270 self.dstate.pdc_pred_count = [0; 2];
1272 let mut use_last = true;
1273 for mb_y in 0..self.mb_h {
1274 for mb_x in 0..self.mb_w {
1275 self.decode_mb_features(&mut bc, mb_x, mb_y)?;
1276 self.dstate.has_y2 = true;
1277 if self.dstate.is_intra {
1278 let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS);
1279 if ymode == PredMode::BPred {
1280 self.dstate.has_y2 = false;
1281 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1284 let top_mode = if (y > 0) || (mb_y > 0) {
1285 self.ymodes[iidx + x - self.ymode_stride]
1289 let left_mode = if (x > 0) || (mb_x > 0) {
1290 self.ymodes[iidx + x - 1]
1294 let top_idx = top_mode.to_b_index();
1295 let left_idx = left_mode.to_b_index();
1296 let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]);
1297 self.ymodes[iidx + x] = bmode;
1299 iidx += self.ymode_stride;
1302 self.fill_ymode(mb_x, mb_y, ymode.to_b_mode());
1304 let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS);
1305 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1306 self.mb_info[mb_idx].ymode = ymode;
1307 self.mb_info[mb_idx].uvmode = uvmode;
1308 } else if !bc.read_prob(self.dstate.prob_intra_pred) {
1309 let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob);
1310 if ymode == PredMode::BPred {
1311 self.dstate.has_y2 = false;
1312 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1315 let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS);
1316 self.ymodes[iidx + x] = bmode;
1318 iidx += self.ymode_stride;
1321 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1323 let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob);
1324 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1325 self.mb_info[mb_idx].ymode = ymode;
1326 self.mb_info[mb_idx].uvmode = uvmode;
1327 self.fill_mv(mb_x, mb_y, ZERO_MV);
1329 use_last = !bc.read_prob(self.dstate.prob_last_pred);
1331 let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y);
1332 let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs);
1335 VPMBType::InterNearest => {
1336 self.fill_mv(mb_x, mb_y, nearest_mv);
1338 VPMBType::InterNear => {
1339 self.fill_mv(mb_x, mb_y, near_mv);
1341 VPMBType::InterNoMV => {
1342 self.fill_mv(mb_x, mb_y, ZERO_MV);
1344 VPMBType::InterMV => {
1345 let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]);
1346 let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]);
1347 let new_mv = pred_mv + MV{ x: dmx, y: dmy };
1348 self.fill_mv(mb_x, mb_y, new_mv);
1350 VPMBType::InterFourMV => {
1351 self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?;
1353 _ => unreachable!(),
1356 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1357 self.mb_info[mb_idx].mb_type = mbtype;
1358 self.mb_info[mb_idx].ymode = PredMode::Inter;
1359 self.mb_info[mb_idx].uvmode = PredMode::Inter;
1361 self.decode_residue(&mut bc_main, mb_x, mb_idx, use_last);
1362 match self.mb_info[mb_idx].mb_type {
1363 VPMBType::Intra => {
1364 self.recon_intra_mb(&mut dframe, mb_x, mb_y)?;
1367 self.recon_inter_mb(&mut dframe, mb_x, mb_y, use_last);
1370 if let Some(loop_str) = self.dstate.force_loop_str {
1371 self.mb_info[mb_idx].loop_str = loop_str;
1373 self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level;
1375 self.mb_info[mb_idx].upd_gf = self.dstate.force_gf_update;
1378 self.pcache.update_row();
1381 for mb_y in 0..self.mb_h {
1382 for mb_x in 0..self.mb_w {
1383 let loop_str = self.mb_info[mb_idx].loop_str;
1384 self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str);
1388 if !update_gf && self.dstate.features[2].is_some() {
1389 let gf = self.shuf.get_golden().unwrap();
1390 let mut new_gf = supp.pool_u8.get_copy(&gf).unwrap();
1391 let dframe = NASimpleVideoFrame::from_video_buf(&mut new_gf).unwrap();
1393 let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
1394 for mb_y in 0..self.mb_h {
1395 for mb_x in 0..self.mb_w {
1396 if self.mb_info[mb_idx].upd_gf {
1397 mc_block16x16(dframe.data, dframe.offset[0] + mb_x * 16 + mb_y * 16 * dframe.stride[0], dframe.stride[0], mb_x * 16, mb_y * 16, 0, 0, buf.clone(), 0, &mut mc_buf);
1398 mc_block8x8(dframe.data, dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1], dframe.stride[1], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 1, &mut mc_buf);
1399 mc_block8x8(dframe.data, dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2], dframe.stride[2], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 2, &mut mc_buf);
1404 self.shuf.add_golden_frame(new_gf);
1408 self.scan.copy_from_slice(&self.tmp_scan);
1411 self.shuf.add_golden_frame(buf.clone());
1413 self.shuf.add_frame(buf.clone());
1415 let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf));
1416 frm.set_keyframe(self.dstate.is_intra);
1417 frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P });
1420 fn flush(&mut self) {
1425 pub fn get_decoder() -> Box<NADecoder + Send> {
1426 Box::new(VP7Decoder::new())
1431 use nihav_core::codecs::RegisteredDecoders;
1432 use nihav_core::demuxers::RegisteredDemuxers;
1433 use nihav_core::test::dec_video::*;
1434 use crate::duck_register_all_codecs;
1435 use nihav_commonfmt::generic_register_all_demuxers;
1439 let mut dmx_reg = RegisteredDemuxers::new();
1440 generic_register_all_demuxers(&mut dmx_reg);
1441 let mut dec_reg = RegisteredDecoders::new();
1442 duck_register_all_codecs(&mut dec_reg);
1444 test_decoding("avi", "vp7", "assets/Duck/interlaced_blit_pitch.avi", Some(12), &dmx_reg,
1445 &dec_reg, ExpectedTestResult::MD5Frames(vec![
1446 [0xb79fb6f8, 0xed51ac9e, 0x9e423456, 0xc0918e7f],
1447 [0xbf8d1274, 0x83515e15, 0x8c0887de, 0xfbfd05d3],
1448 [0x8ad00466, 0x80b6cbfb, 0x54de408e, 0x9efbc05e],
1449 [0x144122c5, 0x6897b553, 0x93474d29, 0x1a1274ec],
1450 [0x06ff5d07, 0x55825d38, 0x072b0a78, 0xfcb5020f],
1451 [0xfd01591b, 0xc42113e7, 0xc5a5550f, 0xb30f3b02],
1452 [0x155e0d6e, 0x96d75e06, 0x9bd7ce87, 0xacf868e1],
1453 [0xfd79103a, 0x695d21d3, 0xfeacb5b4, 0x1d869d08],
1454 [0xf4bcfeac, 0x0d2c305c, 0x11416c96, 0x626a5ef6],
1455 [0x3579b66c, 0x0a7d7dc0, 0xe80b0395, 0xf6a70661],
1456 [0x5773768c, 0x813442e9, 0x4dd6f793, 0xb10fe55f],
1457 [0xcaaf0ddb, 0x65c2410e, 0x95da5bba, 0x3b90128e],
1458 [0x74773773, 0xe1dbadeb, 0x57aaf64b, 0x9c21e3c7]]));
1462 /*const DEFAULT_ZIGZAG: [usize; 16] = [
1468 const DEFAULT_SCAN_ORDER: [usize; 16] = [
1475 const Y_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1476 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1477 VPTreeDef::Index(4), VPTreeDef::Index(6),
1478 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Value(PredMode::HPred),
1479 VPTreeDef::Value(PredMode::TMPred), VPTreeDef::Value(PredMode::BPred),
1481 const KF_Y_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1482 VPTreeDef::Value(PredMode::BPred), VPTreeDef::Index(2),
1483 VPTreeDef::Index(4), VPTreeDef::Index(6),
1484 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Value(PredMode::VPred),
1485 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Value(PredMode::TMPred),
1487 const UV_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1488 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1489 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Index(4),
1490 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Value(PredMode::TMPred)
1492 const B_MODE_TREE: &[VPTreeDef<PredMode>] = &[
1493 VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
1494 VPTreeDef::Value(PredMode::TMPred), VPTreeDef::Index(4),
1495 VPTreeDef::Value(PredMode::VPred), VPTreeDef::Index(6),
1496 VPTreeDef::Index(8), VPTreeDef::Index(12),
1497 VPTreeDef::Value(PredMode::HPred), VPTreeDef::Index(10),
1498 VPTreeDef::Value(PredMode::RDPred), VPTreeDef::Value(PredMode::VRPred),
1499 VPTreeDef::Value(PredMode::LDPred), VPTreeDef::Index(14),
1500 VPTreeDef::Value(PredMode::VLPred), VPTreeDef::Index(16),
1501 VPTreeDef::Value(PredMode::HDPred), VPTreeDef::Value(PredMode::HUPred)
1504 const FEATURE_TREE: &[VPTreeDef<usize>] = &[
1505 VPTreeDef::Index(2), VPTreeDef::Index(4),
1506 VPTreeDef::Value(0), VPTreeDef::Value(1),
1507 VPTreeDef::Value(2), VPTreeDef::Value(3)
1510 const COEF_TREE: &[VPTreeDef<DCTToken>] = &[
1511 VPTreeDef::Value(DCTToken::EOB), VPTreeDef::Index(2),
1512 VPTreeDef::Value(DCTToken::Zero), VPTreeDef::Index(4),
1513 VPTreeDef::Value(DCTToken::One), VPTreeDef::Index(6),
1514 VPTreeDef::Index(8), VPTreeDef::Index(12),
1515 VPTreeDef::Value(DCTToken::Two), VPTreeDef::Index(10),
1516 VPTreeDef::Value(DCTToken::Three), VPTreeDef::Value(DCTToken::Four),
1517 VPTreeDef::Index(14), VPTreeDef::Index(16),
1518 VPTreeDef::Value(DCTToken::Cat1), VPTreeDef::Value(DCTToken::Cat2),
1519 VPTreeDef::Index(18), VPTreeDef::Index(20),
1520 VPTreeDef::Value(DCTToken::Cat3), VPTreeDef::Value(DCTToken::Cat4),
1521 VPTreeDef::Value(DCTToken::Cat5), VPTreeDef::Value(DCTToken::Cat6)
1524 const MV_REF_TREE: &[VPTreeDef<VPMBType>] = &[
1525 VPTreeDef::Value(VPMBType::InterNoMV), VPTreeDef::Index(2),
1526 VPTreeDef::Value(VPMBType::InterNearest), VPTreeDef::Index(4),
1527 VPTreeDef::Value(VPMBType::InterNear), VPTreeDef::Index(6),
1528 VPTreeDef::Value(VPMBType::InterMV), VPTreeDef::Value(VPMBType::InterFourMV)
1530 const SMALL_MV_TREE: &[VPTreeDef<i16>] = &[
1531 VPTreeDef::Index(2), VPTreeDef::Index(8),
1532 VPTreeDef::Index(4), VPTreeDef::Index(6),
1533 VPTreeDef::Value(0), VPTreeDef::Value(1),
1534 VPTreeDef::Value(2), VPTreeDef::Value(3),
1535 VPTreeDef::Index(10), VPTreeDef::Index(12),
1536 VPTreeDef::Value(4), VPTreeDef::Value(5),
1537 VPTreeDef::Value(6), VPTreeDef::Value(7)
1539 const MV_SPLIT_MODE_TREE: &[VPTreeDef<MVSplitMode>] = &[
1540 VPTreeDef::Value(MVSplitMode::Sixteenths), VPTreeDef::Index(2),
1541 VPTreeDef::Value(MVSplitMode::Quarters), VPTreeDef::Index(4),
1542 VPTreeDef::Value(MVSplitMode::TopBottom), VPTreeDef::Value(MVSplitMode::LeftRight)
1544 const SUB_MV_REF_TREE: &[VPTreeDef<SubMVRef>] = &[
1545 VPTreeDef::Value(SubMVRef::Left), VPTreeDef::Index(2),
1546 VPTreeDef::Value(SubMVRef::Above), VPTreeDef::Index(4),
1547 VPTreeDef::Value(SubMVRef::Zero), VPTreeDef::Value(SubMVRef::New)