--- /dev/null
+use nihav_core::codecs::*;
+use nihav_core::io::byteio::*;
+use nihav_core::data::GenericCache;
+use super::vpcommon::*;
+use super::vp7data::*;
+use super::vp7dsp::*;
+
+enum VPTreeDef<T: Copy> {
+ Index(u8),
+ Value(T),
+}
+
+trait VPTreeReader {
+ fn read_tree<T:Copy>(&mut self, tree_def: &[VPTreeDef<T>], tree_prob: &[u8]) -> T;
+}
+
+impl<'a> VPTreeReader for BoolCoder<'a> {
+ fn read_tree<T:Copy>(&mut self, tree_def: &[VPTreeDef<T>], tree_prob: &[u8]) -> T {
+ let mut idx = 0;
+
+ loop {
+ let bit = self.read_prob(tree_prob[idx >> 1]);
+ match tree_def[idx + (bit as usize)] {
+ VPTreeDef::Value(v) => return v,
+ VPTreeDef::Index(ix) => { idx = ix as usize; },
+ };
+ }
+ }
+}
+
+#[repr(u8)]
+#[derive(Clone,Copy,PartialEq,Debug)]
+enum PredMode {
+ DCPred,
+ HPred,
+ VPred,
+ TMPred,
+ BPred,
+
+ //sub-block prediction modes
+ LDPred,
+ RDPred,
+ VRPred,
+ VLPred,
+ HDPred,
+ HUPred,
+
+ Inter,
+}
+
+impl Default for PredMode {
+ fn default() -> Self { PredMode::DCPred }
+}
+
+impl PredMode {
+ fn to_b_mode(self) -> Self {
+ if self == PredMode::DCPred {
+ self
+ } else {
+ PredMode::TMPred
+ }
+ }
+ fn to_b_index(self) -> usize {
+ match self {
+ PredMode::DCPred => 0,
+ PredMode::TMPred => 1,
+ PredMode::VPred => 2,
+ PredMode::HPred => 3,
+ PredMode::LDPred => 4,
+ PredMode::RDPred => 5,
+ PredMode::VRPred => 6,
+ PredMode::VLPred => 7,
+ PredMode::HDPred => 8,
+ PredMode::HUPred => 9,
+ _ => unreachable!(),
+ }
+ }
+}
+
+const PITCH_MODE_NORMAL: u8 = 0;
+const PITCH_MODE_FOUR: u8 = 1;
+const PITCH_MODE_X2: u8 = 2;
+const PITCH_MODE_X4: u8 = 3;
+
+#[derive(Clone,Copy,Default)]
+struct MBFeature {
+ present_prob: u8,
+ tree_probs: [u8; 3],
+ def_val: [u8; 4],
+}
+
+#[derive(Clone,Copy,PartialEq)]
+enum DCTToken {
+ Zero,
+ One,
+ Two,
+ Three,
+ Four,
+ Cat1,
+ Cat2,
+ Cat3,
+ Cat4,
+ Cat5,
+ Cat6,
+ EOB,
+}
+
+fn expand_token(bc: &mut BoolCoder, token: DCTToken) -> i16 {
+ let cat;
+ match token {
+ DCTToken::Zero => return 0,
+ DCTToken::One => return if bc.read_bool() { -1 } else { 1 },
+ DCTToken::Two => return if bc.read_bool() { -2 } else { 2 },
+ DCTToken::Three => return if bc.read_bool() { -3 } else { 3 },
+ DCTToken::Four => return if bc.read_bool() { -4 } else { 4 },
+ DCTToken::Cat1 => cat = 0,
+ DCTToken::Cat2 => cat = 1,
+ DCTToken::Cat3 => cat = 2,
+ DCTToken::Cat4 => cat = 3,
+ DCTToken::Cat5 => cat = 4,
+ DCTToken::Cat6 => cat = 5,
+ _ => unreachable!(),
+ };
+ let mut add = 0i16;
+ let add_probs = &VP56_COEF_ADD_PROBS[cat];
+ for prob in add_probs.iter() {
+ if *prob == 128 { break; }
+ add = (add << 1) | (bc.read_prob(*prob) as i16);
+ }
+ let sign = bc.read_bool();
+ let level = VP56_COEF_BASE[cat] + add;
+ if !sign {
+ level
+ } else {
+ -level
+ }
+}
+
+struct SBParams<'a> {
+ coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4],
+ scan: &'a [usize; 16],
+ qmat: &'a [i16; 16],
+}
+
+fn decode_subblock<'a>(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 {
+ const COEF_BANDS: [usize; 16] = [ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
+
+ let mut has_nz = 0;
+ let start = if ctype != 0 { 0 } else { 1 };
+ *coeffs = [0; 16];
+ let mut cval = pctx as usize;
+ for idx in start..16 {
+ let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval];
+ let tok = bc.read_tree(COEF_TREE, probs);
+ if tok == DCTToken::EOB { break; }
+ let level = expand_token(bc, tok);
+ coeffs[sbparams.scan[idx]] = level.wrapping_mul(sbparams.qmat[idx]);
+ cval = level.abs().min(2) as usize;
+ has_nz |= cval;
+ }
+ if has_nz > 0 { 1 } else { 0 }
+}
+
+#[derive(Clone,Copy,Default)]
+struct MBInfo {
+ mb_type: VPMBType,
+ ymode: PredMode,
+ uvmode: PredMode,
+ loop_str: u8,
+ upd_gf: bool,
+}
+
+#[derive(Default)]
+struct DecoderState {
+ features: [Option<MBFeature>; 4],
+
+ fading: bool,
+ fade_alpha: u16,
+ fade_beta: u16,
+
+ lf_simple: bool,
+ loop_filter_level: u8,
+ loop_sharpness: u8,
+
+ is_intra: bool,
+ version: u8,
+
+ kf_ymode_prob: [u8; 4],
+ kf_uvmode_prob: [u8; 3],
+
+ prob_intra_pred: u8,
+ prob_last_pred: u8,
+
+ coef_probs: [[[[u8; 11]; 3]; 8]; 4],
+ mv_probs: [[u8; 17]; 2],
+
+ force_quant: Option<u8>,
+ force_loop_str: Option<u8>,
+ force_gf_update: bool,
+ force_pitch: Option<u8>,
+
+ has_y2: bool,
+ pdc_pred_val: i16,
+ pdc_pred_count: usize,
+
+ ipred_ctx_y: IPredContext,
+ ipred_ctx_u: IPredContext,
+ ipred_ctx_v: IPredContext,
+}
+
+impl DecoderState {
+ fn reset(&mut self) {
+ self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS);
+ self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS);
+ self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS);
+ self.mv_probs.copy_from_slice(&DEFAULT_MV_PROBS);
+ }
+}
+
+#[derive(Clone,Copy,Debug,PartialEq)]
+enum MVSplitMode {
+ TopBottom,
+ LeftRight,
+ Quarters,
+ Sixteenths,
+}
+
+#[derive(Clone,Copy,Debug,PartialEq)]
+enum SubMVRef {
+ Left,
+ Above,
+ New,
+ Zero,
+}
+
+fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 17]) -> i16 {
+ const LONG_VECTOR_ORDER: [usize; 7] = [ 0, 1, 2, 7, 6, 5, 4 ];
+
+ let val = if !bc.read_prob(probs[0]) {
+ bc.read_tree(SMALL_MV_TREE, &probs[2..9])
+ } else {
+ let raw_probs = &probs[9..];
+ let mut raw = 0;
+ for ord in LONG_VECTOR_ORDER.iter() {
+ raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord;
+ }
+ if (raw & 0xF0) != 0 {
+ raw |= (bc.read_prob(raw_probs[3]) as i16) << 3;
+ } else {
+ raw |= 1 << 3;
+ }
+ raw
+ };
+ if (val == 0) || !bc.read_prob(probs[1]) {
+ val
+ } else {
+ -val
+ }
+}
+
+struct PredCache {
+ y_pred: GenericCache<u8>,
+ u_pred: GenericCache<u8>,
+ v_pred: GenericCache<u8>,
+ y2_pred: GenericCache<u8>,
+ y_pred_left: [u8; 4],
+ u_pred_left: [u8; 2],
+ v_pred_left: [u8; 2],
+ y2_pred_left: u8,
+}
+
+impl PredCache {
+ fn new() -> Self {
+ Self {
+ y_pred: GenericCache::new(1, 1, 0),
+ u_pred: GenericCache::new(1, 1, 0),
+ v_pred: GenericCache::new(1, 1, 0),
+ y2_pred: GenericCache::new(1, 1, 0),
+ y_pred_left: [0; 4],
+ u_pred_left: [0; 2],
+ v_pred_left: [0; 2],
+ y2_pred_left: 0,
+ }
+ }
+ fn resize(&mut self, mb_w: usize) {
+ self.y_pred = GenericCache::new(4, mb_w * 4 + 1, 0);
+ self.u_pred = GenericCache::new(2, mb_w * 2 + 1, 0);
+ self.v_pred = GenericCache::new(2, mb_w * 2 + 1, 0);
+ self.y2_pred = GenericCache::new(1, mb_w + 1, 0);
+ }
+ fn reset(&mut self) {
+ self.y_pred.reset();
+ self.u_pred.reset();
+ self.v_pred.reset();
+ self.y2_pred.reset();
+ self.y_pred_left = [0; 4];
+ self.u_pred_left = [0; 2];
+ self.v_pred_left = [0; 2];
+ self.y2_pred_left = 0;
+ }
+ fn update_row(&mut self) {
+ self.y_pred.update_row();
+ self.u_pred.update_row();
+ self.v_pred.update_row();
+ self.y2_pred.update_row();
+ }
+}
+
+struct VP7Decoder {
+ info: NACodecInfoRef,
+
+ shuf: VPShuffler,
+ width: usize,
+ height: usize,
+ mb_w: usize,
+ mb_h: usize,
+ mb_info: Vec<MBInfo>,
+ mvs: Vec<MV>,
+ mv_stride: usize,
+
+ ymodes: Vec<PredMode>,
+ ymode_stride: usize,
+ uvmodes: Vec<PredMode>,
+ uvmode_stride: usize,
+
+ dstate: DecoderState,
+ pcache: PredCache,
+
+ coeffs: [[i16; 16]; 25],
+ scan: [usize; 16],
+ qmat: [[[i16; 16]; 3]; 5],
+
+ mc_buf: NAVideoBufferRef<u8>,
+
+ tmp_scan: [usize; 16],
+}
+
+impl VP7Decoder {
+ fn new() -> Self {
+ let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap();
+ let mut scan = [0; 16];
+ scan.copy_from_slice(&DEFAULT_SCAN_ORDER);
+ let mc_buf = vt.get_vbuf().unwrap();
+ Self {
+ info: NACodecInfoRef::default(),
+
+ shuf: VPShuffler::new(),
+ width: 0,
+ height: 0,
+ mb_w: 0,
+ mb_h: 0,
+ mb_info: Vec::new(),
+ mvs: Vec::new(),
+ mv_stride: 0,
+
+ ymodes: Vec::new(),
+ ymode_stride: 0,
+ uvmodes: Vec::new(),
+ uvmode_stride: 0,
+
+ dstate: DecoderState::default(),
+ pcache: PredCache::new(),
+
+ coeffs: [[0; 16]; 25],
+ scan,
+ tmp_scan: [0; 16],
+ qmat: [[[0; 16]; 3]; 5],
+
+ mc_buf,
+ }
+ }
+ fn set_dimensions(&mut self, width: usize, height: usize) {
+ if (width == self.width) && (height == self.height) {
+ return;
+ }
+ self.width = width;
+ self.height = height;
+ self.mb_w = (self.width + 15) >> 4;
+ self.mb_h = (self.height + 15) >> 4;
+ self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
+ self.mv_stride = self.mb_w * 4;
+ self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV);
+
+ self.ymode_stride = self.mb_w * 4;
+ self.uvmode_stride = self.mb_w;
+ self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default());
+ self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default());
+
+ self.pcache.resize(self.mb_w);
+ }
+ fn read_features(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
+ for (i, feat) in self.dstate.features.iter_mut().enumerate() {
+ if bc.read_bool() {
+ let mut feature = MBFeature::default();
+ feature.present_prob = bc.read_byte();
+ for tp in feature.tree_probs.iter_mut() {
+ if bc.read_bool() {
+ *tp = bc.read_byte();
+ } else {
+ *tp = 255;
+ }
+ }
+ if i != 2 {
+ let fbits = match i {
+ 0 => 7,
+ 1 => 6,
+ _ => if self.dstate.version == 0 { 8 } else { 5 },
+ };
+ for dval in feature.def_val.iter_mut() {
+ if bc.read_bool() {
+ *dval = bc.read_bits(fbits) as u8;
+ } else {
+ *dval = 0;
+ }
+ }
+ }
+ *feat = Some(feature);
+ } else {
+ *feat = None;
+ }
+ }
+ Ok(())
+ }
+ fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
+ for i in 0..4 {
+ for j in 0..8 {
+ for k in 0..3 {
+ for l in 0..11 {
+ if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) {
+ self.dstate.coef_probs[i][j][k][l] = bc.read_byte();
+ }
+ }
+ }
+ }
+ }
+ Ok(())
+ }
+ fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
+ for comp in 0..2 {
+ for i in 0..17 {
+ if bc.read_prob(MV_UPDATE_PROBS[comp][i]) {
+ self.dstate.mv_probs[comp][i] = bc.read_probability();
+ }
+ }
+ }
+ Ok(())
+ }
+ fn decode_mb_features(&mut self, bc: &mut BoolCoder, _mb_x: usize, _mb_y: usize) -> DecoderResult<()> {
+ self.dstate.force_quant = None;
+ self.dstate.force_loop_str = None;
+ self.dstate.force_gf_update = false;
+ self.dstate.force_pitch = None;
+ for (i, feat) in self.dstate.features.iter().enumerate() {
+ if let Some(feat) = feat {
+ let present = bc.read_prob(feat.present_prob);
+ if present {
+ let ftype_idx = bc.read_tree(FEATURE_TREE, &feat.tree_probs);
+ let val = feat.def_val[ftype_idx];
+ match i {
+ 0 => self.dstate.force_quant = Some(ftype_idx as u8),
+ 1 => self.dstate.force_loop_str = Some(val),
+ 2 => self.dstate.force_gf_update = true,
+ _ => self.dstate.force_pitch = Some(val),
+ };
+ }
+ }
+ }
+ Ok(())
+ }
+ fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_idx: usize) {
+ let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 };
+ let mut sbparams = SBParams {
+ scan: &DEFAULT_SCAN_ORDER,
+ qmat: &self.qmat[qmat_idx][2],
+ coef_probs: &self.dstate.coef_probs,
+ };
+ let mut has_ac = [false; 25];
+ let ytype;
+ if self.dstate.has_y2 {
+ let pred = &self.pcache.y2_pred;
+ let pidx = pred.xpos + mb_x;
+ let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride];
+
+ let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams);
+ self.pcache.y2_pred.data[pidx] = has_nz;
+ self.pcache.y2_pred_left = has_nz;
+ has_ac[24] = has_nz > 0;
+
+ ytype = 0;
+ } else {
+ let pred = &mut self.pcache.y2_pred;
+ let pidx = pred.xpos + mb_x;
+ pred.data[pidx] = pred.data[pidx - pred.stride];
+
+ ytype = 3;
+ }
+ sbparams.scan = &self.scan;
+ sbparams.qmat = &self.qmat[qmat_idx][0];
+ for i in 0..16 {
+ let bx = i & 3;
+ let by = i >> 2;
+ let pred = &self.pcache.y_pred;
+ let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride;
+ let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride];
+
+ let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams);
+ self.pcache.y_pred.data[pidx] = has_nz;
+ self.pcache.y_pred_left[by] = has_nz;
+ has_ac[i] = has_nz > 0;
+ }
+ sbparams.qmat = &self.qmat[qmat_idx][1];
+ for i in 16..20 {
+ let bx = i & 1;
+ let by = (i >> 1) & 1;
+ let pred = &self.pcache.u_pred;
+ let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
+ let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride];
+
+ let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
+ self.pcache.u_pred.data[pidx] = has_nz;
+ self.pcache.u_pred_left[by] = has_nz;
+ has_ac[i] = has_nz > 0;
+ }
+ for i in 20..24 {
+ let bx = i & 1;
+ let by = (i >> 1) & 1;
+ let pred = &self.pcache.v_pred;
+ let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
+ let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride];
+
+ let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
+ self.pcache.v_pred.data[pidx] = has_nz;
+ self.pcache.v_pred_left[by] = has_nz;
+ has_ac[i] = has_nz > 0;
+ }
+
+ if self.dstate.has_y2 {
+ let y2block = &mut self.coeffs[24];
+ if self.mb_info[mb_idx].mb_type != VPMBType::Intra {
+ let mut dc = y2block[0];
+ let pval = self.dstate.pdc_pred_val;
+ if self.dstate.pdc_pred_count > 3 {
+ dc += pval;
+ y2block[0] = dc;
+ }
+ if (pval == 0) || (dc == 0) || ((pval ^ dc) < 0) {
+ self.dstate.pdc_pred_val = dc;
+ self.dstate.pdc_pred_count = 0;
+ } else if dc == pval {
+ self.dstate.pdc_pred_count += 1;
+ }
+ }
+ if has_ac[24] {
+ idct4x4(y2block);
+ } else {
+ idct4x4_dc(y2block);
+ }
+ for i in 0..16 {
+ self.coeffs[i][0] = self.coeffs[24][i];
+ }
+ }
+ for i in 0..24 {
+ if has_ac[i] {
+ idct4x4(&mut self.coeffs[i]);
+ } else {
+ idct4x4_dc(&mut self.coeffs[i]);
+ }
+ }
+ }
+
+ fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
+ self.qmat[0][0][0] = Y_DC_QUANTS[y_dc_q];
+ for i in 1..16 {
+ self.qmat[0][0][i] = Y_AC_QUANTS[y_ac_q];
+ }
+ self.qmat[0][1][0] = UV_DC_QUANTS[uv_dc_q];
+ for i in 1..16 {
+ self.qmat[0][1][i] = UV_AC_QUANTS[uv_ac_q];
+ }
+ self.qmat[0][2][0] = Y2_DC_QUANTS[y2_dc_q];
+ for i in 1..16 {
+ self.qmat[0][2][i] = Y2_AC_QUANTS[y2_ac_q];
+ }
+ if let Some(ref feat) = self.dstate.features[0] {
+ for j in 0..4 {
+ let q = feat.def_val[j] as usize;
+ self.qmat[j + 1][0][0] = Y_DC_QUANTS[q];
+ for i in 1..16 {
+ self.qmat[j + 1][0][i] = Y_AC_QUANTS[q];
+ }
+ self.qmat[j + 1][1][0] = UV_DC_QUANTS[q];
+ for i in 1..16 {
+ self.qmat[j + 1][1][i] = UV_AC_QUANTS[q];
+ }
+ self.qmat[j + 1][2][0] = Y2_DC_QUANTS[q];
+ for i in 1..16 {
+ self.qmat[j + 1][2][i] = Y2_AC_QUANTS[q];
+ }
+ }
+ }
+ }
+ fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) {
+ let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
+ for _ in 0..4 {
+ for x in 0..4 {
+ self.ymodes[iidx + x] = ymode;
+ }
+ iidx += self.ymode_stride;
+ }
+ }
+ fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) {
+ let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
+ for _ in 0..4 {
+ for x in 0..4 {
+ self.mvs[iidx + x] = mv;
+ }
+ iidx += self.mb_w * 4;
+ }
+ }
+ fn find_mv_pred(&self, mb_x: usize, mb_y: usize) -> ([u8; 4], MV, MV, MV) {
+ const CAND_POS: [(i8, i8, u8, u8); 12] = [
+ (-1, 0, 8, 12), ( 0, -1, 8, 3),
+ (-1, -1, 2, 15), (-1, 1, 2, 12),
+ (-2, 0, 2, 12), ( 0, -2, 2, 3),
+ (-1, -2, 1, 15), (-2, -1, 1, 15),
+ (-2, 1, 1, 12), (-1, 2, 1, 12),
+ (-2, -2, 1, 15), (-2, 2, 1, 12)
+ ];
+
+ let mut nearest_mv = ZERO_MV;
+ let mut near_mv = ZERO_MV;
+
+ let mut ct: [u8; 4] = [0; 4];
+
+ let start = if self.dstate.version == 0 { 1 } else { 0 };
+ let mvwrap = (self.mb_w as isize) + 1;
+ for (yoff, xoff, weight, blk_no) in CAND_POS.iter() {
+ let cx = (mb_x as isize) + (*xoff as isize);
+ let cy = (mb_y as isize) + (*yoff as isize);
+ let mvpos = cx + cy * mvwrap;
+ if (mvpos < start) || ((mvpos % mvwrap) == (mvwrap - 1)) {
+ ct[0] += weight;
+ continue;
+ }
+ let cx = (mvpos % mvwrap) as usize;
+ let cy = (mvpos / mvwrap) as usize;
+ let bx = (*blk_no as usize) & 3;
+ let by = (*blk_no as usize) >> 2;
+ let blk_pos = cx * 4 + bx + (cy * 4 + by) * self.mv_stride;
+ let mv = self.mvs[blk_pos];
+ if mv == ZERO_MV {
+ ct[0] += weight;
+ continue;
+ }
+ let idx;
+ if (nearest_mv == ZERO_MV) || (nearest_mv == mv) {
+ nearest_mv = mv;
+ idx = 1;
+ } else if near_mv == ZERO_MV {
+ near_mv = mv;
+ idx = 2;
+ } else {
+ idx = if mv == near_mv { 2 } else { 3 };
+ }
+ ct[idx] += weight;
+ }
+ let pred_mv = if ct[1] > ct[2] {
+ if ct[1] >= ct[0] { nearest_mv } else { ZERO_MV }
+ } else {
+ if ct[2] >= ct[0] { near_mv } else { ZERO_MV }
+ };
+
+ let mvprobs = [INTER_MODE_PROBS[ct[0] as usize][0],
+ INTER_MODE_PROBS[ct[1] as usize][1],
+ INTER_MODE_PROBS[ct[2] as usize][2],
+ INTER_MODE_PROBS[ct[2] as usize][3]];
+
+ (mvprobs, nearest_mv, near_mv, pred_mv)
+ }
+ fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV {
+ let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS);
+ let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride;
+ match mode {
+ SubMVRef::Left => {
+ if (mb_x > 0) || (bx > 0) {
+ self.mvs[mvidx - 1]
+ } else {
+ ZERO_MV
+ }
+ },
+ SubMVRef::Above => {
+ if (mb_y > 0) || (by > 0) {
+ self.mvs[mvidx - self.mv_stride]
+ } else {
+ ZERO_MV
+ }
+ },
+ SubMVRef::Zero => ZERO_MV,
+ SubMVRef::New => {
+ let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]);
+ let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]);
+ pred_mv + MV{ x: dmx, y: dmy }
+ },
+ }
+ }
+ fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> {
+ let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS);
+ let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
+ match split_mode {
+ MVSplitMode::TopBottom => {
+ let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
+ for _ in 0..2 {
+ for x in 0..4 { self.mvs[mvidx + x] = top_mv; }
+ mvidx += self.mv_stride;
+ }
+ let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv);
+ for _ in 2..4 {
+ for x in 0..4 { self.mvs[mvidx + x] = bot_mv; }
+ mvidx += self.mv_stride;
+ }
+ },
+ MVSplitMode::LeftRight => {
+ let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
+ self.mvs[mvidx + 1] = left_mv;
+ let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv);
+ for _ in 0..4 {
+ self.mvs[mvidx + 0] = left_mv;
+ self.mvs[mvidx + 1] = left_mv;
+ self.mvs[mvidx + 2] = right_mv;
+ self.mvs[mvidx + 3] = right_mv;
+ mvidx += self.mv_stride;
+ }
+ },
+ MVSplitMode::Quarters => {
+ for y in (0..4).step_by(2) {
+ for x in (0..4).step_by(2) {
+ self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
+ self.mvs[mvidx + x + 1] = self.mvs[mvidx + x];
+ }
+ for x in 0..4 {
+ self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x];
+ }
+ mvidx += self.mv_stride * 2;
+ }
+ },
+ MVSplitMode::Sixteenths => {
+ for y in 0..4 {
+ for x in 0..4 {
+ self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
+ }
+ mvidx += self.mv_stride;
+ }
+ },
+ };
+ Ok(())
+ }
+
+ fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool, pitch_mode: u8) {
+ if do_luma {
+ let ydst = &mut dframe.data[dframe.offset[0]..];
+ let ystride = dframe.stride[0];
+ let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
+ match pitch_mode {
+ PITCH_MODE_NORMAL => {
+ for y in 0..4 {
+ for x in 0..4 {
+ add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]);
+ }
+ yoff += 4 * ystride;
+ }
+ },
+ PITCH_MODE_FOUR => {
+ for y in 0..16 {
+ add_coeffs16x1(ydst, yoff, &self.coeffs[y]);
+ yoff += ystride;
+ }
+ },
+ PITCH_MODE_X2 => {
+ for y in 0..2 {
+ for x in 0..4 {
+ add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]);
+ }
+ yoff += 8 * ystride;
+ }
+ yoff -= 15 * ystride;
+ for y in 2..4 {
+ for x in 0..4 {
+ add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]);
+ }
+ yoff += 8 * ystride;
+ }
+ },
+ PITCH_MODE_X4 => {
+ for y in 0..4 {
+ for x in 0..4 {
+ add_coeffs4x4(ydst, yoff + x * 4, ystride * 4, &self.coeffs[x + y * 4]);
+ }
+ yoff += ystride;
+ }
+ },
+ _ => unreachable!(),
+ };
+ }
+ let dst = &mut dframe.data[0..];
+ let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
+ let ustride = dframe.stride[1];
+ let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
+ let vstride = dframe.stride[2];
+ if (pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_FOUR) {
+ for y in 0..2 {
+ for x in 0..2 {
+ add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]);
+ add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]);
+ }
+ uoff += ustride * 4;
+ voff += vstride * 4;
+ }
+ } else {
+ for y in 0..2 {
+ for x in 0..2 {
+ add_coeffs4x4(dst, uoff + x * 4, ustride * 2, &self.coeffs[16 + x + y * 2]);
+ add_coeffs4x4(dst, voff + x * 4, vstride * 2, &self.coeffs[20 + x + y * 2]);
+ }
+ uoff += ustride;
+ voff += vstride;
+ }
+ }
+ }
+ fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize) -> DecoderResult<()> {
+ let pitch = self.dstate.force_pitch.unwrap_or(0);
+ let pitch_mode = (pitch >> 3) & 3;
+
+ let mb_idx = mb_x + mb_y * self.mb_w;
+ let has_top = mb_y > 0;
+ let has_left = mb_x > 0;
+ let ydst = &mut dframe.data[dframe.offset[0]..];
+ let ystride = dframe.stride[0];
+ let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
+ let ipred_ctx_y = &mut self.dstate.ipred_ctx_y;
+ ipred_ctx_y.has_top = has_top;
+ ipred_ctx_y.has_left = has_left;
+ let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred;
+ if is_normal {
+ ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16);
+ match self.mb_info[mb_idx].ymode {
+ PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y),
+ PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y),
+ PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y),
+ PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y),
+ _ => unreachable!(),
+ };
+ } else {
+ validate!((pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_X2));
+ let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
+ let mut tr_save = [0x80u8; 16];
+ if pitch_mode == PITCH_MODE_X2 {
+ // reorganise coefficient data for interlaced case
+ for y in (0..4).step_by(2) {
+ for x in 0..4 {
+ let mut tmpblock = [0i16; 16 * 2];
+ let eidx = x + y * 4;
+ let oidx = x + y * 4 + 4;
+ for i in 0..4 {
+ for j in 0..4 {
+ tmpblock[i * 8 + 0 + j] = self.coeffs[eidx][i * 4 + j];
+ tmpblock[i * 8 + 4 + j] = self.coeffs[oidx][i * 4 + j];
+ }
+ }
+ self.coeffs[eidx].copy_from_slice(&tmpblock[0..16]);
+ self.coeffs[oidx].copy_from_slice(&tmpblock[16..32]);
+ }
+ }
+ }
+ for y in 0..4 {
+ for x in 0..4 {
+ ipred_ctx_y.has_left = has_left || x > 0;
+ let bmode = self.ymodes[iidx + x];
+ let cur_yoff = yoff + x * 4;
+ let has_tr = has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1)));
+ let has_dl = ipred_ctx_y.has_left && (y < 3);
+ ipred_ctx_y.fill(ydst, cur_yoff, ystride,
+ if has_tr { 8 } else { 4 },
+ if has_dl { 8 } else { 4 });
+ if !has_tr {
+ for i in 0..4 {
+ ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i];
+ }
+ } else {
+ for i in 0..4 {
+ tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4];
+ }
+ }
+ match bmode {
+ PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y),
+ PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y),
+ _ => unreachable!(),
+ };
+ add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]);
+ }
+ ipred_ctx_y.has_top = true;
+ yoff += 4 * ystride;
+ iidx += self.ymode_stride;
+ }
+ }
+ let dst = &mut dframe.data[0..];
+ let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
+ let ustride = dframe.stride[1];
+ let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
+ let vstride = dframe.stride[2];
+ let ipred_ctx_u = &mut self.dstate.ipred_ctx_u;
+ let ipred_ctx_v = &mut self.dstate.ipred_ctx_v;
+ ipred_ctx_u.has_top = has_top;
+ ipred_ctx_v.has_top = has_top;
+ ipred_ctx_u.has_left = has_left;
+ ipred_ctx_v.has_left = has_left;
+ ipred_ctx_u.fill(dst, uoff, ustride, 8, 8);
+ ipred_ctx_v.fill(dst, voff, vstride, 8, 8);
+ match self.mb_info[mb_idx].uvmode {
+ PredMode::DCPred => {
+ IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u);
+ IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v);
+ },
+ PredMode::HPred => {
+ IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u);
+ IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v);
+ },
+ PredMode::VPred => {
+ IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u);
+ IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v);
+ },
+ PredMode::TMPred => {
+ IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u);
+ IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v);
+ },
+ _ => unreachable!(),
+ };
+ self.add_residue(dframe, mb_x, mb_y, is_normal, pitch_mode);
+ Ok(())
+ }
+ fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, use_last: bool) {
+ let pitch = self.dstate.force_pitch.unwrap_or(0);
+ let pitch_dmode = (pitch >> 3) & 3;
+ let pitch_smode = pitch & 7;
+
+ let refframe = (if use_last { self.shuf.get_last() } else { self.shuf.get_golden() }).unwrap();
+ let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV;
+ let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
+ let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
+
+ let dst = &mut dframe.data[0..];
+ let ystride = dframe.stride[0];
+ let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
+ if pitch_smode == 0 {
+ if single_mv {
+ mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16,
+ self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf);
+ } else {
+ for y in 0..4 {
+ for x in 0..4 {
+ mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
+ self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf);
+ }
+ yoff += 4 * ystride;
+ iidx += self.mv_stride;
+ }
+ }
+ } else {
+ if single_mv {
+ mc_block_special(dst, yoff, ystride, mb_x * 16, mb_y * 16,
+ self.mvs[iidx].x * 2, self.mvs[iidx].y * 2,
+ refframe.clone(), 0, &mut mc_buf, 16, pitch_smode);
+ } else {
+ for y in 0..4 {
+ for x in 0..4 {
+ mc_block_special(dst, yoff + x * 4, ystride,
+ mb_x * 16 + x * 4, mb_y * 16 + y * 4,
+ self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2,
+ refframe.clone(), 0, &mut mc_buf, 4, pitch_smode);
+ }
+ yoff += 4 * ystride;
+ iidx += self.mv_stride;
+ }
+ }
+ }
+
+ let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
+ let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
+ let ustride = dframe.stride[1];
+ let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
+ let vstride = dframe.stride[2];
+ if single_mv {
+ let chroma_mv = self.mvs[iidx];
+
+ if pitch_smode == 0 {
+ mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
+ mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
+ } else {
+ mc_block_special(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y,
+ refframe.clone(), 1, &mut mc_buf, 8, pitch_smode);
+ mc_block_special(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y,
+ refframe.clone(), 2, &mut mc_buf, 8, pitch_smode);
+ }
+ } else {
+ for y in 0..2 {
+ for x in 0..2 {
+ let mut chroma_mv = self.mvs[iidx] + self.mvs[iidx + 1]
+ + self.mvs[iidx + self.mv_stride]
+ + self.mvs[iidx + self.mv_stride + 1];
+ chroma_mv.x /= 4;
+ chroma_mv.y /= 4;
+
+ if pitch_smode == 0 {
+ mc_block4x4(dst, uoff, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
+ chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
+ mc_block4x4(dst, voff, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
+ chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
+ } else {
+ mc_block_special(dst, uoff, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
+ chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf,
+ 4, pitch_smode);
+ mc_block_special(dst, voff, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
+ chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf,
+ 4, pitch_smode);
+ }
+ }
+ uoff += ustride * 4;
+ voff += vstride * 4;
+ iidx += 2 * self.mv_stride;
+ }
+ }
+ self.add_residue(dframe, mb_x, mb_y, true, pitch_dmode);
+ }
+ fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8) {
+ const HIGH_EDGE_VAR_THR: [[u8; 64]; 2] = [
+ [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ ], [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+ ]];
+
+ let edge_thr = (loop_str as i16) + 2;
+ let luma_thr = loop_str as i16;
+ let chroma_thr = (loop_str as i16) * 2;
+ let inner_thr = if self.dstate.loop_sharpness == 0 {
+ loop_str as i16
+ } else {
+ let bound1 = (9 - self.dstate.loop_sharpness) as i16;
+ let shift = (self.dstate.loop_sharpness + 3) >> 2;
+ ((loop_str as i16) >> shift).min(bound1)
+ };
+ let hev_thr = HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize] as i16;
+
+ let ystride = dframe.stride[0];
+ let ustride = dframe.stride[1];
+ let vstride = dframe.stride[2];
+ let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
+ let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride;
+ let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride;
+
+ let (loop_edge, loop_inner) = if self.dstate.lf_simple {
+ (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc)
+ } else {
+ (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc)
+ };
+
+ if mb_x > 0 {
+ loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr);
+ loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr);
+ loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr);
+ }
+ if mb_y > 0 {
+ loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr);
+ loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr);
+ loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr);
+ }
+
+ for y in 1..4 {
+ loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, luma_thr, inner_thr, hev_thr);
+ }
+ loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, chroma_thr, inner_thr, hev_thr);
+ loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, chroma_thr, inner_thr, hev_thr);
+
+ for x in 1..4 {
+ loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, luma_thr, inner_thr, hev_thr);
+ }
+ loop_inner(dframe.data, upos + 4, 1, ustride, 8, chroma_thr, inner_thr, hev_thr);
+ loop_inner(dframe.data, vpos + 4, 1, vstride, 8, chroma_thr, inner_thr, hev_thr);
+ }
+}
+
+impl NADecoder for VP7Decoder {
+ fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> {
+ if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
+ let fmt = YUV420_FORMAT;
+ let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt);
+ let myinfo = NACodecTypeInfo::Video(myvinfo.clone());
+ self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref();
+
+ supp.pool_u8.set_dec_bufs(4);
+ supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?;
+ self.set_dimensions(myvinfo.get_width(), myvinfo.get_height());
+ Ok(())
+ } else {
+ Err(DecoderError::InvalidData)
+ }
+ }
+ fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> {
+ let src = pkt.get_buffer();
+
+ validate!(src.len() > 4);
+
+ let frame_tag = read_u24le(src.as_slice())?;
+ self.dstate.is_intra = (frame_tag & 1) == 0;
+ self.dstate.version = ((frame_tag >> 1) & 7) as u8;
+ let part2_off = (frame_tag >> 4) as usize;
+ let part1_off = if self.dstate.version == 0 { 4 } else { 3 };
+
+ validate!(src.len() > part1_off + part2_off);
+ let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?;
+ let mut bc_main = BoolCoder::new(&src[part1_off + part2_off..])?;
+ if self.dstate.is_intra {
+ let width = bc.read_bits(12) as usize;
+ let height = bc.read_bits(12) as usize;
+ let _scalev = bc.read_bits(2);
+ let _scaleh = bc.read_bits(2);
+ validate!((width > 0) && (height > 0));
+ self.set_dimensions(width, height);
+
+ self.dstate.reset();
+ }
+
+ self.read_features(&mut bc)?;
+
+ let y_ac_q = bc.read_bits(7) as usize;
+ let y_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
+ let y2_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
+ let y2_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
+ let uv_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
+ let uv_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q };
+ self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
+
+ let update_gf = if self.dstate.is_intra { true } else { bc.read_bool() };
+
+ let mut has_fading_feature = true;
+ let mut keep_probs = true;
+ if self.dstate.version != 0 {
+ keep_probs = bc.read_bool();
+ if self.dstate.is_intra {
+ has_fading_feature = true;
+ } else {
+ has_fading_feature = bc.read_bool();
+ }
+ }
+
+ if has_fading_feature {
+ self.dstate.fading = bc.read_bool();
+ if self.dstate.fading {
+ self.dstate.fade_alpha = bc.read_sbits(8) as u16;
+ self.dstate.fade_beta = bc.read_sbits(8) as u16;
+ if let Some(pframe) = self.shuf.get_last() {
+ let mut fframe = supp.pool_u8.get_free().unwrap();
+ let mut dframe = NASimpleVideoFrame::from_video_buf(&mut fframe).unwrap();
+ fade_frame(pframe, &mut dframe, self.dstate.fade_alpha, self.dstate.fade_beta);
+ self.shuf.add_frame(fframe);
+ }
+ }
+ } else {
+ self.dstate.fading = false;
+ }
+
+ if self.dstate.version == 0 {
+ self.dstate.lf_simple = bc.read_bool();
+ }
+
+ if bc.read_bool() {
+ for i in 1..16 {
+ self.scan[i] = DEFAULT_SCAN_ORDER[bc.read_bits(4) as usize];
+ }
+ }
+
+ if self.dstate.version != 0 {
+ self.dstate.lf_simple = bc.read_bool();
+ } else {
+ self.dstate.lf_simple = false;
+ }
+
+ self.dstate.loop_filter_level = bc.read_bits(6) as u8;
+ self.dstate.loop_sharpness = bc.read_bits(3) as u8;
+
+ self.read_dct_coef_prob_upd(&mut bc)?;
+
+ if !self.dstate.is_intra {
+ self.dstate.prob_intra_pred = bc.read_byte();
+ self.dstate.prob_last_pred = bc.read_byte();
+ if bc.read_bool() {
+ for i in 0..4 {
+ self.dstate.kf_ymode_prob[i] = bc.read_byte();
+ }
+ }
+ if bc.read_bool() {
+ for i in 0..3 {
+ self.dstate.kf_uvmode_prob[i] = bc.read_byte();
+ }
+ }
+ self.read_mv_prob_upd(&mut bc)?;
+ }
+ if !keep_probs {
+ self.tmp_scan.copy_from_slice(&self.scan);
+ }
+
+ let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT);
+ let ret = supp.pool_u8.get_free();
+ if ret.is_none() {
+ return Err(DecoderError::AllocError);
+ }
+ let mut buf = ret.unwrap();
+ if buf.get_info() != vinfo {
+ self.shuf.clear();
+ supp.pool_u8.reset();
+ supp.pool_u8.prealloc_video(vinfo, 4)?;
+ let ret = supp.pool_u8.get_free();
+ if ret.is_none() {
+ return Err(DecoderError::AllocError);
+ }
+ buf = ret.unwrap();
+ }
+ let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap();
+
+ let mut mb_idx = 0;
+ self.pcache.reset();
+ self.dstate.pdc_pred_val = 0;
+ self.dstate.pdc_pred_count = 0;
+ let mut use_last = true;
+ for mb_y in 0..self.mb_h {
+ for mb_x in 0..self.mb_w {
+ self.decode_mb_features(&mut bc, mb_x, mb_y)?;
+ self.dstate.has_y2 = true;
+ if self.dstate.is_intra {
+ let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS);
+ if ymode == PredMode::BPred {
+ self.dstate.has_y2 = false;
+ let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
+ for y in 0..4 {
+ for x in 0..4 {
+ let top_mode = if (y > 0) || (mb_y > 0) {
+ self.ymodes[iidx + x - self.ymode_stride]
+ } else {
+ PredMode::DCPred
+ };
+ let left_mode = if (x > 0) || (mb_x > 0) {
+ self.ymodes[iidx + x - 1]
+ } else {
+ PredMode::DCPred
+ };
+ let top_idx = top_mode.to_b_index();
+ let left_idx = left_mode.to_b_index();
+ let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]);
+ self.ymodes[iidx + x] = bmode;
+ }
+ iidx += self.ymode_stride;
+ }
+ } else {
+ self.fill_ymode(mb_x, mb_y, ymode.to_b_mode());
+ }
+ let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS);
+ self.mb_info[mb_idx].mb_type = VPMBType::Intra;
+ self.mb_info[mb_idx].ymode = ymode;
+ self.mb_info[mb_idx].uvmode = uvmode;
+ } else if !bc.read_prob(self.dstate.prob_intra_pred) {
+ let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob);
+ if ymode == PredMode::BPred {
+ self.dstate.has_y2 = false;
+ let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
+ for _y in 0..4 {
+ for x in 0..4 {
+ let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS);
+ self.ymodes[iidx + x] = bmode;
+ }
+ iidx += self.ymode_stride;
+ }
+ } else {
+ self.fill_ymode(mb_x, mb_y, PredMode::Inter);
+ }
+ let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob);
+ self.mb_info[mb_idx].mb_type = VPMBType::Intra;
+ self.mb_info[mb_idx].ymode = ymode;
+ self.mb_info[mb_idx].uvmode = uvmode;
+ self.fill_mv(mb_x, mb_y, ZERO_MV);
+ } else {
+ use_last = !bc.read_prob(self.dstate.prob_last_pred);
+
+ let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y);
+ let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs);
+
+ match mbtype {
+ VPMBType::InterNearest => {
+ self.fill_mv(mb_x, mb_y, nearest_mv);
+ },
+ VPMBType::InterNear => {
+ self.fill_mv(mb_x, mb_y, near_mv);
+ },
+ VPMBType::InterNoMV => {
+ self.fill_mv(mb_x, mb_y, ZERO_MV);
+ },
+ VPMBType::InterMV => {
+ let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]);
+ let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]);
+ let new_mv = pred_mv + MV{ x: dmx, y: dmy };
+ self.fill_mv(mb_x, mb_y, new_mv);
+ },
+ VPMBType::InterFourMV => {
+ self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?;
+ },
+ _ => unreachable!(),
+ };
+
+ self.fill_ymode(mb_x, mb_y, PredMode::Inter);
+ self.mb_info[mb_idx].mb_type = mbtype;
+ self.mb_info[mb_idx].ymode = PredMode::Inter;
+ self.mb_info[mb_idx].uvmode = PredMode::Inter;
+ }
+ self.decode_residue(&mut bc_main, mb_x, mb_idx);
+ match self.mb_info[mb_idx].mb_type {
+ VPMBType::Intra => {
+ self.recon_intra_mb(&mut dframe, mb_x, mb_y)?;
+ },
+ _ => {
+ self.recon_inter_mb(&mut dframe, mb_x, mb_y, use_last);
+ },
+ }
+ if let Some(loop_str) = self.dstate.force_loop_str {
+ self.mb_info[mb_idx].loop_str = loop_str;
+ } else {
+ self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level;
+ }
+ self.mb_info[mb_idx].upd_gf = self.dstate.force_gf_update;
+ mb_idx += 1;
+ }
+ self.pcache.update_row();
+ }
+ let mut mb_idx = 0;
+ for mb_y in 0..self.mb_h {
+ for mb_x in 0..self.mb_w {
+ let loop_str = self.mb_info[mb_idx].loop_str;
+ self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str);
+ mb_idx += 1;
+ }
+ }
+ if !update_gf && self.dstate.features[2].is_some() {
+ let gf = self.shuf.get_golden().unwrap();
+ let mut new_gf = supp.pool_u8.get_copy(&gf).unwrap();
+ let dframe = NASimpleVideoFrame::from_video_buf(&mut new_gf).unwrap();
+ let mut mb_idx = 0;
+ let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
+ for mb_y in 0..self.mb_h {
+ for mb_x in 0..self.mb_w {
+ if self.mb_info[mb_idx].upd_gf {
+ mc_block16x16(dframe.data, dframe.offset[0] + mb_x * 16 + mb_y * 16 * dframe.stride[0], dframe.stride[0], mb_x * 16, mb_y * 16, 0, 0, buf.clone(), 0, &mut mc_buf);
+ mc_block8x8(dframe.data, dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1], dframe.stride[1], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 1, &mut mc_buf);
+ mc_block8x8(dframe.data, dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2], dframe.stride[2], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 2, &mut mc_buf);
+ }
+ mb_idx += 1;
+ }
+ }
+ self.shuf.add_golden_frame(new_gf);
+ }
+
+ if !keep_probs {
+ self.scan.copy_from_slice(&self.tmp_scan);
+ }
+ if update_gf {
+ self.shuf.add_golden_frame(buf.clone());
+ }
+ self.shuf.add_frame(buf.clone());
+
+ let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf));
+ frm.set_keyframe(self.dstate.is_intra);
+ frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P });
+ Ok(frm.into_ref())
+ }
+}
+
+pub fn get_decoder() -> Box<NADecoder> {
+ Box::new(VP7Decoder::new())
+}
+
+#[cfg(test)]
+mod test {
+ use nihav_core::codecs::RegisteredDecoders;
+ use nihav_core::demuxers::RegisteredDemuxers;
+ use nihav_core::test::dec_video::*;
+ use crate::codecs::duck_register_all_codecs;
+ use nihav_commonfmt::demuxers::generic_register_all_demuxers;
+
+ #[test]
+ fn test_vp7() {
+ let mut dmx_reg = RegisteredDemuxers::new();
+ generic_register_all_demuxers(&mut dmx_reg);
+ let mut dec_reg = RegisteredDecoders::new();
+ duck_register_all_codecs(&mut dec_reg);
+
+ //let file = "assets/Duck/potter-40.vp7";
+ //let file = "assets/Duck/potter-500.vp7";
+ //let file = "assets/Duck/starsky-700.vp7";
+ //let file = "assets/Duck/taking-700.vp7";
+ //let file = "assets/Duck/troy-700.vp7";
+ let file = "assets/Duck/interlaced_blit_pitch.avi";
+ //let file = "assets/Duck/vp7.avi";
+ test_file_decoding("avi", file, Some(12), true, false, None/*Some("vp7")*/, &dmx_reg, &dec_reg);
+ }
+}
+
+/*const DEFAULT_ZIGZAG: [usize; 16] = [
+ 0, 1, 5, 6,
+ 2, 4, 7, 12,
+ 3, 8, 11, 13,
+ 9, 10, 14, 15
+];*/
+const DEFAULT_SCAN_ORDER: [usize; 16] = [
+ 0, 1, 4, 8,
+ 5, 2, 3, 6,
+ 9, 12, 13, 10,
+ 7, 11, 14, 15
+];
+
+const Y_MODE_TREE: &[VPTreeDef<PredMode>] = &[
+ VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
+ VPTreeDef::Index(4), VPTreeDef::Index(6),
+ VPTreeDef::Value(PredMode::VPred), VPTreeDef::Value(PredMode::HPred),
+ VPTreeDef::Value(PredMode::TMPred), VPTreeDef::Value(PredMode::BPred),
+];
+const KF_Y_MODE_TREE: &[VPTreeDef<PredMode>] = &[
+ VPTreeDef::Value(PredMode::BPred), VPTreeDef::Index(2),
+ VPTreeDef::Index(4), VPTreeDef::Index(6),
+ VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Value(PredMode::VPred),
+ VPTreeDef::Value(PredMode::HPred), VPTreeDef::Value(PredMode::TMPred),
+];
+const UV_MODE_TREE: &[VPTreeDef<PredMode>] = &[
+ VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
+ VPTreeDef::Value(PredMode::VPred), VPTreeDef::Index(4),
+ VPTreeDef::Value(PredMode::HPred), VPTreeDef::Value(PredMode::TMPred)
+];
+const B_MODE_TREE: &[VPTreeDef<PredMode>] = &[
+ VPTreeDef::Value(PredMode::DCPred), VPTreeDef::Index(2),
+ VPTreeDef::Value(PredMode::TMPred), VPTreeDef::Index(4),
+ VPTreeDef::Value(PredMode::VPred), VPTreeDef::Index(6),
+ VPTreeDef::Index(8), VPTreeDef::Index(12),
+ VPTreeDef::Value(PredMode::HPred), VPTreeDef::Index(10),
+ VPTreeDef::Value(PredMode::RDPred), VPTreeDef::Value(PredMode::VRPred),
+ VPTreeDef::Value(PredMode::LDPred), VPTreeDef::Index(14),
+ VPTreeDef::Value(PredMode::VLPred), VPTreeDef::Index(16),
+ VPTreeDef::Value(PredMode::HDPred), VPTreeDef::Value(PredMode::HUPred)
+];
+
+const FEATURE_TREE: &[VPTreeDef<usize>] = &[
+ VPTreeDef::Index(2), VPTreeDef::Index(4),
+ VPTreeDef::Value(0), VPTreeDef::Value(1),
+ VPTreeDef::Value(2), VPTreeDef::Value(3)
+];
+
+const COEF_TREE: &[VPTreeDef<DCTToken>] = &[
+ VPTreeDef::Value(DCTToken::EOB), VPTreeDef::Index(2),
+ VPTreeDef::Value(DCTToken::Zero), VPTreeDef::Index(4),
+ VPTreeDef::Value(DCTToken::One), VPTreeDef::Index(6),
+ VPTreeDef::Index(8), VPTreeDef::Index(12),
+ VPTreeDef::Value(DCTToken::Two), VPTreeDef::Index(10),
+ VPTreeDef::Value(DCTToken::Three), VPTreeDef::Value(DCTToken::Four),
+ VPTreeDef::Index(14), VPTreeDef::Index(16),
+ VPTreeDef::Value(DCTToken::Cat1), VPTreeDef::Value(DCTToken::Cat2),
+ VPTreeDef::Index(18), VPTreeDef::Index(20),
+ VPTreeDef::Value(DCTToken::Cat3), VPTreeDef::Value(DCTToken::Cat4),
+ VPTreeDef::Value(DCTToken::Cat5), VPTreeDef::Value(DCTToken::Cat6)
+];
+
+const MV_REF_TREE: &[VPTreeDef<VPMBType>] = &[
+ VPTreeDef::Value(VPMBType::InterNoMV), VPTreeDef::Index(2),
+ VPTreeDef::Value(VPMBType::InterNearest), VPTreeDef::Index(4),
+ VPTreeDef::Value(VPMBType::InterNear), VPTreeDef::Index(6),
+ VPTreeDef::Value(VPMBType::InterMV), VPTreeDef::Value(VPMBType::InterFourMV)
+];
+const SMALL_MV_TREE: &[VPTreeDef<i16>] = &[
+ VPTreeDef::Index(2), VPTreeDef::Index(8),
+ VPTreeDef::Index(4), VPTreeDef::Index(6),
+ VPTreeDef::Value(0), VPTreeDef::Value(1),
+ VPTreeDef::Value(2), VPTreeDef::Value(3),
+ VPTreeDef::Index(10), VPTreeDef::Index(12),
+ VPTreeDef::Value(4), VPTreeDef::Value(5),
+ VPTreeDef::Value(6), VPTreeDef::Value(7)
+];
+const MV_SPLIT_MODE_TREE: &[VPTreeDef<MVSplitMode>] = &[
+ VPTreeDef::Value(MVSplitMode::Sixteenths), VPTreeDef::Index(2),
+ VPTreeDef::Value(MVSplitMode::Quarters), VPTreeDef::Index(4),
+ VPTreeDef::Value(MVSplitMode::TopBottom), VPTreeDef::Value(MVSplitMode::LeftRight)
+];
+const SUB_MV_REF_TREE: &[VPTreeDef<SubMVRef>] = &[
+ VPTreeDef::Value(SubMVRef::Left), VPTreeDef::Index(2),
+ VPTreeDef::Value(SubMVRef::Above), VPTreeDef::Index(4),
+ VPTreeDef::Value(SubMVRef::Zero), VPTreeDef::Value(SubMVRef::New)
+];
--- /dev/null
+pub const KF_Y_MODE_TREE_PROBS: &[u8; 4] = &[ 145, 156, 163, 128 ];
+pub const KF_UV_MODE_TREE_PROBS: &[u8; 3] = &[ 142, 114, 183 ];
+pub const Y_MODE_TREE_PROBS: &[u8; 4] = &[ 112, 86, 140, 37 ];
+pub const UV_MODE_TREE_PROBS: &[u8; 3] = &[ 162, 101, 204 ];
+
+pub const MV_UPDATE_PROBS: [[u8; 17]; 2] = [
+ [ 237, 246, 253, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 250, 250, 252 ],
+ [ 231, 243, 245, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 251, 251, 254 ]
+];
+pub const DEFAULT_MV_PROBS: [[u8; 17]; 2] = [
+ [ 162, 128, 225, 146, 172, 147, 214, 39, 156, 247, 210, 135, 68, 138, 220, 239, 246 ],
+ [ 164, 128, 204, 170, 119, 235, 140, 230, 228, 244, 184, 201, 44, 173, 221, 239, 253 ]
+];
+pub const MV_SPLIT_MODE_PROBS: [u8; 3] = [ 110, 111, 150 ];
+pub const SUB_MV_REF_PROBS: [u8; 3] = [ 180, 162, 25 ];
+
+pub const INTER_MODE_PROBS: [[u8; 4]; 31] = [
+ [ 3, 3, 1, 246 ],
+ [ 7, 89, 66, 239 ],
+ [ 10, 90, 78, 238 ],
+ [ 14, 118, 95, 241 ],
+ [ 14, 123, 106, 238 ],
+ [ 20, 140, 109, 240 ],
+ [ 13, 155, 103, 238 ],
+ [ 21, 158, 99, 240 ],
+ [ 27, 82, 108, 232 ],
+ [ 19, 99, 123, 217 ],
+ [ 45, 139, 148, 236 ],
+ [ 50, 117, 144, 235 ],
+ [ 57, 128, 164, 238 ],
+ [ 69, 139, 171, 239 ],
+ [ 74, 154, 179, 238 ],
+ [ 112, 165, 186, 242 ],
+ [ 98, 143, 185, 245 ],
+ [ 105, 153, 190, 250 ],
+ [ 124, 167, 192, 245 ],
+ [ 131, 186, 203, 246 ],
+ [ 59, 184, 222, 224 ],
+ [ 148, 215, 214, 213 ],
+ [ 137, 211, 210, 219 ],
+ [ 190, 227, 128, 228 ],
+ [ 183, 228, 128, 228 ],
+ [ 194, 234, 128, 228 ],
+ [ 202, 236, 128, 228 ],
+ [ 205, 240, 128, 228 ],
+ [ 205, 244, 128, 228 ],
+ [ 225, 246, 128, 228 ],
+ [ 233, 251, 128, 228 ]
+];
+
+pub const B_MODE_TREE_PROBS: &[u8; 9] = &[ 120, 90, 79, 133, 87, 85, 80, 111, 151 ];
+pub const KF_B_MODE_TREE_PROBS: [[[u8; 9]; 10]; 10] = [
+ [
+ [ 231, 120, 48, 89, 115, 113, 120, 152, 112 ],
+ [ 152, 179, 64, 126, 170, 118, 46, 70, 95 ],
+ [ 175, 69, 143, 80, 85, 82, 72, 155, 103 ],
+ [ 56, 58, 10, 171, 218, 189, 17, 13, 152 ],
+ [ 144, 71, 10, 38, 171, 213, 144, 34, 26 ],
+ [ 114, 26, 17, 163, 44, 195, 21, 10, 173 ],
+ [ 121, 24, 80, 195, 26, 62, 44, 64, 85 ],
+ [ 170, 46, 55, 19, 136, 160, 33, 206, 71 ],
+ [ 63, 20, 8, 114, 114, 208, 12, 9, 226 ],
+ [ 81, 40, 11, 96, 182, 84, 29, 16, 36 ],
+ ], [
+ [ 134, 183, 89, 137, 98, 101, 106, 165, 148 ],
+ [ 72, 187, 100, 130, 157, 111, 32, 75, 80 ],
+ [ 66, 102, 167, 99, 74, 62, 40, 234, 128 ],
+ [ 41, 53, 9, 178, 241, 141, 26, 8, 107 ],
+ [ 104, 79, 12, 27, 217, 255, 87, 17, 7 ],
+ [ 74, 43, 26, 146, 73, 166, 49, 23, 157 ],
+ [ 65, 38, 105, 160, 51, 52, 31, 115, 128 ],
+ [ 87, 68, 71, 44, 114, 51, 15, 186, 23 ],
+ [ 47, 41, 14, 110, 182, 183, 21, 17, 194 ],
+ [ 66, 45, 25, 102, 197, 189, 23, 18, 22 ],
+ ], [
+ [ 88, 88, 147, 150, 42, 46, 45, 196, 205 ],
+ [ 43, 97, 183, 117, 85, 38, 35, 179, 61 ],
+ [ 39, 53, 200, 87, 26, 21, 43, 232, 171 ],
+ [ 56, 34, 51, 104, 114, 102, 29, 93, 77 ],
+ [ 107, 54, 32, 26, 51, 1, 81, 43, 31 ],
+ [ 39, 28, 85, 171, 58, 165, 90, 98, 64 ],
+ [ 34, 22, 116, 206, 23, 34, 43, 166, 73 ],
+ [ 68, 25, 106, 22, 64, 171, 36, 225, 114 ],
+ [ 34, 19, 21, 102, 132, 188, 16, 76, 124 ],
+ [ 62, 18, 78, 95, 85, 57, 50, 48, 51 ],
+ ], [
+ [ 193, 101, 35, 159, 215, 111, 89, 46, 111 ],
+ [ 60, 148, 31, 172, 219, 228, 21, 18, 111 ],
+ [ 112, 113, 77, 85, 179, 255, 38, 120, 114 ],
+ [ 40, 42, 1, 196, 245, 209, 10, 25, 109 ],
+ [ 100, 80, 8, 43, 154, 1, 51, 26, 71 ],
+ [ 88, 43, 29, 140, 166, 213, 37, 43, 154 ],
+ [ 61, 63, 30, 155, 67, 45, 68, 1, 209 ],
+ [ 142, 78, 78, 16, 255, 128, 34, 197, 171 ],
+ [ 41, 40, 5, 102, 211, 183, 4, 1, 221 ],
+ [ 51, 50, 17, 168, 209, 192, 23, 25, 82 ],
+ ], [
+ [ 125, 98, 42, 88, 104, 85, 117, 175, 82 ],
+ [ 95, 84, 53, 89, 128, 100, 113, 101, 45 ],
+ [ 75, 79, 123, 47, 51, 128, 81, 171, 1 ],
+ [ 57, 17, 5, 71, 102, 57, 53, 41, 49 ],
+ [ 115, 21, 2, 10, 102, 255, 166, 23, 6 ],
+ [ 38, 33, 13, 121, 57, 73, 26, 1, 85 ],
+ [ 41, 10, 67, 138, 77, 110, 90, 47, 114 ],
+ [ 101, 29, 16, 10, 85, 128, 101, 196, 26 ],
+ [ 57, 18, 10, 102, 102, 213, 34, 20, 43 ],
+ [ 117, 20, 15, 36, 163, 128, 68, 1, 26 ],
+ ], [
+ [ 138, 31, 36, 171, 27, 166, 38, 44, 229 ],
+ [ 67, 87, 58, 169, 82, 115, 26, 59, 179 ],
+ [ 63, 59, 90, 180, 59, 166, 93, 73, 154 ],
+ [ 40, 40, 21, 116, 143, 209, 34, 39, 175 ],
+ [ 57, 46, 22, 24, 128, 1, 54, 17, 37 ],
+ [ 47, 15, 16, 183, 34, 223, 49, 45, 183 ],
+ [ 46, 17, 33, 183, 6, 98, 15, 32, 183 ],
+ [ 65, 32, 73, 115, 28, 128, 23, 128, 205 ],
+ [ 40, 3, 9, 115, 51, 192, 18, 6, 223 ],
+ [ 87, 37, 9, 115, 59, 77, 64, 21, 47 ],
+ ], [
+ [ 104, 55, 44, 218, 9, 54, 53, 130, 226 ],
+ [ 64, 90, 70, 205, 40, 41, 23, 26, 57 ],
+ [ 54, 57, 112, 184, 5, 41, 38, 166, 213 ],
+ [ 30, 34, 26, 133, 152, 116, 10, 32, 134 ],
+ [ 75, 32, 12, 51, 192, 255, 160, 43, 51 ],
+ [ 39, 19, 53, 221, 26, 114, 32, 73, 255 ],
+ [ 31, 9, 65, 234, 2, 15, 1, 118, 73 ],
+ [ 88, 31, 35, 67, 102, 85, 55, 186, 85 ],
+ [ 56, 21, 23, 111, 59, 205, 45, 37, 192 ],
+ [ 55, 38, 70, 124, 73, 102, 1, 34, 98 ],
+ ], [
+ [ 102, 61, 71, 37, 34, 53, 31, 243, 192 ],
+ [ 69, 60, 71, 38, 73, 119, 28, 222, 37 ],
+ [ 68, 45, 128, 34, 1, 47, 11, 245, 171 ],
+ [ 62, 17, 19, 70, 146, 85, 55, 62, 70 ],
+ [ 75, 15, 9, 9, 64, 255, 184, 119, 16 ],
+ [ 37, 43, 37, 154, 100, 163, 85, 160, 1 ],
+ [ 63, 9, 92, 136, 28, 64, 32, 201, 85 ],
+ [ 86, 6, 28, 5, 64, 255, 25, 248, 1 ],
+ [ 56, 8, 17, 132, 137, 255, 55, 116, 128 ],
+ [ 58, 15, 20, 82, 135, 57, 26, 121, 40 ],
+ ], [
+ [ 164, 50, 31, 137, 154, 133, 25, 35, 218 ],
+ [ 51, 103, 44, 131, 131, 123, 31, 6, 158 ],
+ [ 86, 40, 64, 135, 148, 224, 45, 183, 128 ],
+ [ 22, 26, 17, 131, 240, 154, 14, 1, 209 ],
+ [ 83, 12, 13, 54, 192, 255, 68, 47, 28 ],
+ [ 45, 16, 21, 91, 64, 222, 7, 1, 197 ],
+ [ 56, 21, 39, 155, 60, 138, 23, 102, 213 ],
+ [ 85, 26, 85, 85, 128, 128, 32, 146, 171 ],
+ [ 18, 11, 7, 63, 144, 171, 4, 4, 246 ],
+ [ 35, 27, 10, 146, 174, 171, 12, 26, 128 ],
+ ], [
+ [ 190, 80, 35, 99, 180, 80, 126, 54, 45 ],
+ [ 85, 126, 47, 87, 176, 51, 41, 20, 32 ],
+ [ 101, 75, 128, 139, 118, 146, 116, 128, 85 ],
+ [ 56, 41, 15, 176, 236, 85, 37, 9, 62 ],
+ [ 146, 36, 19, 30, 171, 255, 97, 27, 20 ],
+ [ 71, 30, 17, 119, 118, 255, 17, 18, 138 ],
+ [ 101, 38, 60, 138, 55, 70, 43, 26, 142 ],
+ [ 138, 45, 61, 62, 219, 1, 81, 188, 64 ],
+ [ 32, 41, 20, 117, 151, 142, 20, 21, 163 ],
+ [ 112, 19, 12, 61, 195, 128, 48, 4, 24 ],
+ ]
+];
+
+pub const DCT_UPDATE_PROBS: [[[[u8; 11]; 3]; 8]; 4] = [
+ [
+ [
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255 ],
+ [ 250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ],
+ ], [
+ [
+ [ 217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255 ],
+ [ 234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255 ],
+ ], [
+ [ 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255 ],
+ ], [
+ [ 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ ], [
+ [ 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ ], [
+ [ 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ]
+ ], [
+ [
+ [ 186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ]
+ ], [
+ [
+ [ 248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ], [
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ],
+ [ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 ]
+ ]
+ ]
+];
+pub const DEFAULT_DCT_PROBS: [[[[u8; 11]; 3]; 8]; 4] = [
+ [
+ [
+ [ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ ], [
+ [ 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 ],
+ [ 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 ],
+ [ 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 ],
+ ], [
+ [ 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 ],
+ [ 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 ],
+ [ 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 ],
+ ], [
+ [ 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 ],
+ [ 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 ],
+ [ 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 ],
+ ], [
+ [ 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 ],
+ [ 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 ],
+ [ 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 ],
+ ], [
+ [ 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 ],
+ [ 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 ],
+ [ 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 ],
+ ], [
+ [ 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 ],
+ [ 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 ],
+ [ 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 ],
+ ], [
+ [ 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ ]
+ ], [
+ [
+ [ 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 ],
+ [ 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 ],
+ [ 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 ],
+ ], [
+ [ 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 ],
+ [ 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 ],
+ [ 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 ],
+ ], [
+ [ 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 ],
+ [ 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 ],
+ [ 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 ],
+ ], [
+ [ 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 ],
+ [ 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 ],
+ [ 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 ],
+ ], [
+ [ 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 ],
+ [ 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 ],
+ [ 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 ],
+ ], [
+ [ 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 ],
+ [ 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 ],
+ [ 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 ],
+ ], [
+ [ 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 ],
+ [ 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 ],
+ [ 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 ],
+ ], [
+ [ 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 ],
+ [ 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 ],
+ [ 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 ],
+ ]
+ ], [
+ [
+ [ 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 ],
+ [ 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 ],
+ [ 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 ],
+ ], [
+ [ 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 ],
+ [ 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 ],
+ [ 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 ],
+ ], [
+ [ 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 ],
+ [ 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 ],
+ [ 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 ],
+ ], [
+ [ 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 ],
+ [ 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 ],
+ [ 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 ],
+ ], [
+ [ 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 ],
+ [ 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 ],
+ [ 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ ], [
+ [ 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ ], [
+ [ 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 ],
+ [ 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 ],
+ [ 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ ], [
+ [ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ ]
+ ], [
+ [
+ [ 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 ],
+ [ 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 ],
+ [ 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 ],
+ ], [
+ [ 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 ],
+ [ 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 ],
+ [ 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 ],
+ ], [
+ [ 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 ],
+ [ 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 ],
+ [ 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 ],
+ ], [
+ [ 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 ],
+ [ 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 ],
+ [ 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 ],
+ ], [
+ [ 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 ],
+ [ 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 ],
+ [ 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 ],
+ ], [
+ [ 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 ],
+ [ 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 ],
+ [ 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 ],
+ ], [
+ [ 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 ],
+ [ 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 ],
+ [ 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 ],
+ ], [
+ [ 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ [ 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 ],
+ ]
+ ]
+];
+
+pub const Y_DC_QUANTS: [i16; 128] = [
+ 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 33, 34, 35, 36, 36, 37, 38, 39, 39, 40, 41, 41, 42, 43,
+ 43, 44, 45, 45, 46, 47, 48, 48, 49, 50, 51, 52, 53, 53, 54, 56,
+ 57, 58, 59, 60, 62, 63, 65, 66, 68, 70, 72, 74, 76, 79, 81, 84,
+ 87, 90, 93, 96, 100, 104, 108, 112, 116, 121, 126, 131, 136, 142, 148, 154,
+ 160, 167, 174, 182, 189, 198, 206, 215, 224, 234, 244, 254, 265, 277, 288, 301,
+ 313, 327, 340, 355, 370, 385, 401, 417, 434, 452, 470, 489, 509, 529, 550, 572
+];
+pub const Y_AC_QUANTS: [i16; 128] = [
+ 4, 4, 5, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17,
+ 19, 20, 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 38, 40, 41,
+ 42, 44, 45, 46, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 59, 61,
+ 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 75, 76, 78, 80, 82, 84,
+ 86, 88, 91, 93, 96, 99, 102, 105, 109, 112, 116, 121, 125, 130, 135, 140,
+ 146, 152, 158, 165, 172, 180, 188, 196, 205, 214, 224, 234, 245, 256, 268, 281,
+ 294, 308, 322, 337, 353, 369, 386, 404, 423, 443, 463, 484, 506, 529, 553, 578,
+ 604, 631, 659, 688, 718, 749, 781, 814, 849, 885, 922, 960, 1000, 1041, 1083, 1127
+];
+pub const Y2_DC_QUANTS: [i16; 128] = [
+ 7, 9, 11, 13, 15, 17, 19, 21, 23, 26, 28, 30, 33, 35, 37, 39,
+ 42, 44, 46, 48, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 70, 72,
+ 74, 75, 77, 78, 80, 81, 83, 84, 85, 87, 88, 89, 90, 92, 93, 94,
+ 95, 96, 97, 99, 100, 101, 102, 104, 105, 106, 108, 109, 111, 113, 114, 116,
+ 118, 120, 123, 125, 128, 131, 134, 137, 140, 144, 148, 152, 156, 161, 166, 171,
+ 176, 182, 188, 195, 202, 209, 217, 225, 234, 243, 253, 263, 274, 285, 297, 309,
+ 322, 336, 350, 365, 381, 397, 414, 432, 450, 470, 490, 511, 533, 556, 579, 604,
+ 630, 656, 684, 713, 742, 773, 805, 838, 873, 908, 945, 983, 1022, 1063, 1105, 1148
+];
+pub const Y2_AC_QUANTS: [i16; 128] = [
+ 7, 9, 11, 13, 16, 18, 21, 24, 26, 29, 32, 35, 38, 41, 43, 46,
+ 49, 52, 55, 58, 61, 64, 66, 69, 72, 74, 77, 79, 82, 84, 86, 88,
+ 91, 93, 95, 97, 98, 100, 102, 104, 105, 107, 109, 110, 112, 113, 115, 116,
+ 117, 119, 120, 122, 123, 125, 127, 128, 130, 132, 134, 136, 138, 141, 143, 146,
+ 149, 152, 155, 158, 162, 166, 171, 175, 180, 185, 191, 197, 204, 210, 218, 226,
+ 234, 243, 252, 262, 273, 284, 295, 308, 321, 335, 350, 365, 381, 398, 416, 435,
+ 455, 476, 497, 520, 544, 569, 595, 622, 650, 680, 711, 743, 776, 811, 848, 885,
+ 925, 965, 1008, 1052, 1097, 1144, 1193, 1244, 1297, 1351, 1407, 1466, 1526, 1588, 1652, 1719
+];
+pub const UV_DC_QUANTS: [i16; 128] = [
+ 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 33, 34, 35, 36, 36, 37, 38, 39, 39, 40, 41, 41, 42, 43,
+ 43, 44, 45, 45, 46, 47, 48, 48, 49, 50, 51, 52, 53, 53, 54, 56,
+ 57, 58, 59, 60, 62, 63, 65, 66, 68, 70, 72, 74, 76, 79, 81, 84,
+ 87, 90, 93, 96, 100, 104, 108, 112, 116, 121, 126, 131, 132, 132, 132, 132,
+ 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132,
+ 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132
+];
+pub const UV_AC_QUANTS: [i16; 128] = [
+ 4, 4, 5, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17,
+ 19, 20, 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 38, 40, 41,
+ 42, 44, 45, 46, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 59, 61,
+ 62, 63, 64, 65, 67, 68, 69, 70, 72, 73, 75, 76, 78, 80, 82, 84,
+ 86, 88, 91, 93, 96, 99, 102, 105, 109, 112, 116, 121, 125, 130, 135, 140,
+ 146, 152, 158, 165, 172, 180, 188, 196, 205, 214, 224, 234, 245, 256, 268, 281,
+ 294, 308, 322, 337, 353, 369, 386, 404, 423, 443, 463, 484, 506, 529, 553, 578,
+ 604, 631, 659, 688, 718, 749, 781, 814, 849, 885, 922, 960, 1000, 1041, 1083, 1127
+];
--- /dev/null
+use nihav_core::frame::*;
+use nihav_core::codecs::blockdsp::edge_emu;
+
+fn clip_u8(val: i16) -> u8 {
+ val.max(0).min(255) as u8
+}
+
+pub struct IPredContext {
+ pub left: [u8; 16],
+ pub has_left: bool,
+ pub top: [u8; 16],
+ pub has_top: bool,
+ pub tl: u8,
+}
+
+impl IPredContext {
+ pub fn fill(&mut self, src: &[u8], off: usize, stride: usize, tsize: usize, lsize: usize) {
+ if self.has_top {
+ for i in 0..tsize {
+ self.top[i] = src[off - stride + i];
+ }
+ for i in tsize..16 {
+ self.top[i] = 0x80;
+ }
+ } else {
+ self.top = [0x80; 16];
+ }
+ if self.has_left {
+ for i in 0..lsize {
+ self.left[i] = src[off - 1 + i * stride];
+ }
+ for i in lsize..16 {
+ self.left[i] = 0x80;
+ }
+ } else {
+ self.left = [0x80; 16];
+ }
+ if self.has_top && self.has_left {
+ self.tl = src[off - stride - 1];
+ } else {
+ self.tl = 0x80;
+ }
+ }
+}
+
+impl Default for IPredContext {
+ fn default() -> Self {
+ Self {
+ left: [0x80; 16],
+ top: [0x80; 16],
+ tl: 0x80,
+ has_left: false,
+ has_top: false,
+ }
+ }
+}
+
+const DCT_COEFFS: [i32; 16] = [
+ 23170, 23170, 23170, 23170,
+ 30274, 12540, -12540, -30274,
+ 23170, -23170, -23170, 23170,
+ 12540, -30274, 30274, -12540
+];
+
+pub fn idct4x4(coeffs: &mut [i16; 16]) {
+ let mut tmp = [0i16; 16];
+ for (src, dst) in coeffs.chunks(4).zip(tmp.chunks_mut(4)) {
+ let s0 = src[0] as i32;
+ let s1 = src[1] as i32;
+ let s2 = src[2] as i32;
+ let s3 = src[3] as i32;
+
+ let t0 = (s0 + s2).wrapping_mul(23170);
+ let t1 = (s0 - s2).wrapping_mul(23170);
+ let t2 = s1.wrapping_mul(30274) + s3.wrapping_mul(12540);
+ let t3 = s1.wrapping_mul(12540) - s3.wrapping_mul(30274);
+
+ dst[0] = ((t0 + t2) >> 14) as i16;
+ dst[1] = ((t1 + t3) >> 14) as i16;
+ dst[2] = ((t1 - t3) >> 14) as i16;
+ dst[3] = ((t0 - t2) >> 14) as i16;
+ }
+ for i in 0..4 {
+ let s0 = tmp[i + 4 * 0] as i32;
+ let s1 = tmp[i + 4 * 1] as i32;
+ let s2 = tmp[i + 4 * 2] as i32;
+ let s3 = tmp[i + 4 * 3] as i32;
+
+ let t0 = (s0 + s2).wrapping_mul(23170) + 0x20000;
+ let t1 = (s0 - s2).wrapping_mul(23170) + 0x20000;
+ let t2 = s1.wrapping_mul(30274) + s3.wrapping_mul(12540);
+ let t3 = s1.wrapping_mul(12540) - s3.wrapping_mul(30274);
+
+ coeffs[i + 0 * 4] = ((t0 + t2) >> 18) as i16;
+ coeffs[i + 1 * 4] = ((t1 + t3) >> 18) as i16;
+ coeffs[i + 2 * 4] = ((t1 - t3) >> 18) as i16;
+ coeffs[i + 3 * 4] = ((t0 - t2) >> 18) as i16;
+ }
+}
+
+pub fn idct4x4_dc(coeffs: &mut [i16; 16]) {
+ let dc = (((((coeffs[0] as i32) * DCT_COEFFS[0]) >> 14) * DCT_COEFFS[0] + 0x20000) >> 18) as i16;
+ for el in coeffs.iter_mut() {
+ *el = dc;
+ }
+}
+
+pub fn add_coeffs4x4(dst: &mut [u8], off: usize, stride: usize, coeffs: &[i16; 16]) {
+ let dst = &mut dst[off..];
+ for (out, src) in dst.chunks_mut(stride).zip(coeffs.chunks(4)) {
+ for (oel, iel) in out.iter_mut().take(4).zip(src.iter()) {
+ *oel = clip_u8((*oel as i16) + *iel);
+ }
+ }
+}
+pub fn add_coeffs16x1(dst: &mut [u8], off: usize, coeffs: &[i16; 16]) {
+ let dst = &mut dst[off..];
+ for (oel, iel) in dst.iter_mut().take(16).zip(coeffs.iter()) {
+ *oel = clip_u8((*oel as i16) + *iel);
+ }
+}
+
+pub trait IntraPred {
+ const SIZE: usize;
+ fn ipred_dc(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let dc;
+ if !ipred.has_left && !ipred.has_top {
+ dc = 0x80;
+ } else {
+ let mut dcsum = 0;
+ let mut dcshift = match Self::SIZE {
+ 16 => 3,
+ _ => 2,
+ };
+ if ipred.has_left {
+ for el in ipred.left.iter().take(Self::SIZE) {
+ dcsum += *el as u16;
+ }
+ dcshift += 1;
+ }
+ if ipred.has_top {
+ for el in ipred.top.iter().take(Self::SIZE) {
+ dcsum += *el as u16;
+ }
+ dcshift += 1;
+ }
+ dc = ((dcsum + (1 << (dcshift - 1))) >> dcshift) as u8;
+ }
+ for _ in 0..Self::SIZE {
+ let out = &mut dst[off..][..Self::SIZE];
+ for el in out.iter_mut() {
+ *el = dc;
+ }
+ off += stride;
+ }
+ }
+ fn ipred_v(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ for _ in 0..Self::SIZE {
+ let out = &mut dst[off..][..Self::SIZE];
+ out.copy_from_slice(&ipred.top[0..Self::SIZE]);
+ off += stride;
+ }
+ }
+ fn ipred_h(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ for leftel in ipred.left.iter().take(Self::SIZE) {
+ let out = &mut dst[off..][..Self::SIZE];
+ for el in out.iter_mut() {
+ *el = *leftel;
+ }
+ off += stride;
+ }
+ }
+ fn ipred_tm(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let tl = ipred.tl as i16;
+ for m in 0..Self::SIZE {
+ for n in 0..Self::SIZE {
+ dst[off + n] = clip_u8((ipred.left[m] as i16) + (ipred.top[n] as i16) - tl);
+ }
+ off += stride;
+ }
+ }
+}
+
+pub struct IPred16x16 {}
+impl IntraPred for IPred16x16 { const SIZE: usize = 16; }
+
+pub struct IPred8x8 {}
+impl IntraPred for IPred8x8 { const SIZE: usize = 8; }
+
+macro_rules! load_pred4 {
+ (topleft; $ipred: expr) => {{
+ let tl = $ipred.tl as u16;
+ let a0 = $ipred.top[0] as u16;
+ let l0 = $ipred.left[0] as u16;
+ ((l0 + tl * 2 + a0 + 2) >> 2) as u8
+ }};
+ (top; $ipred: expr) => {{
+ let tl = $ipred.tl as u16;
+ let a0 = $ipred.top[0] as u16;
+ let a1 = $ipred.top[1] as u16;
+ let a2 = $ipred.top[2] as u16;
+ let a3 = $ipred.top[3] as u16;
+ let a4 = $ipred.top[4] as u16;
+ let p0 = ((tl + a0 * 2 + a1 + 2) >> 2) as u8;
+ let p1 = ((a0 + a1 * 2 + a2 + 2) >> 2) as u8;
+ let p2 = ((a1 + a2 * 2 + a3 + 2) >> 2) as u8;
+ let p3 = ((a2 + a3 * 2 + a4 + 2) >> 2) as u8;
+ (p0, p1, p2, p3)
+ }};
+ (top8; $ipred: expr) => {{
+ let t3 = $ipred.top[3] as u16;
+ let t4 = $ipred.top[4] as u16;
+ let t5 = $ipred.top[5] as u16;
+ let t6 = $ipred.top[6] as u16;
+ let t7 = $ipred.top[7] as u16;
+ let p4 = ((t3 + t4 * 2 + t5 + 2) >> 2) as u8;
+ let p5 = ((t4 + t5 * 2 + t6 + 2) >> 2) as u8;
+ let p6 = ((t5 + t6 * 2 + t7 + 2) >> 2) as u8;
+ let p7 = ((t6 + t7 * 2 + t7 + 2) >> 2) as u8;
+ (p4, p5, p6, p7)
+ }};
+ (topavg; $ipred: expr) => {{
+ let tl = $ipred.tl as u16;
+ let a0 = $ipred.top[0] as u16;
+ let a1 = $ipred.top[1] as u16;
+ let a2 = $ipred.top[2] as u16;
+ let a3 = $ipred.top[3] as u16;
+ let p0 = ((tl + a0 + 1) >> 1) as u8;
+ let p1 = ((a0 + a1 + 1) >> 1) as u8;
+ let p2 = ((a1 + a2 + 1) >> 1) as u8;
+ let p3 = ((a2 + a3 + 1) >> 1) as u8;
+ (p0, p1, p2, p3)
+ }};
+ (left; $ipred: expr) => {{
+ let tl = $ipred.tl as u16;
+ let l0 = $ipred.left[0] as u16;
+ let l1 = $ipred.left[1] as u16;
+ let l2 = $ipred.left[2] as u16;
+ let l3 = $ipred.left[3] as u16;
+ let l4 = $ipred.left[4] as u16;
+ let p0 = ((tl + l0 * 2 + l1 + 2) >> 2) as u8;
+ let p1 = ((l0 + l1 * 2 + l2 + 2) >> 2) as u8;
+ let p2 = ((l1 + l2 * 2 + l3 + 2) >> 2) as u8;
+ let p3 = ((l2 + l3 * 2 + l4 + 2) >> 2) as u8;
+ (p0, p1, p2, p3)
+ }};
+ (left8; $ipred: expr) => {{
+ let l3 = $ipred.left[3] as u16;
+ let l4 = $ipred.left[4] as u16;
+ let l5 = $ipred.left[5] as u16;
+ let l6 = $ipred.left[6] as u16;
+ let l7 = $ipred.left[7] as u16;
+ let p4 = ((l3 + l4 * 2 + l5 + 2) >> 2) as u8;
+ let p5 = ((l4 + l5 * 2 + l6 + 2) >> 2) as u8;
+ let p6 = ((l5 + l6 * 2 + l7 + 2) >> 2) as u8;
+ let p7 = ((l6 + l7 * 2 + l7 + 2) >> 2) as u8;
+ (p4, p5, p6, p7)
+ }};
+ (leftavg; $ipred: expr) => {{
+ let tl = $ipred.tl as u16;
+ let l0 = $ipred.left[0] as u16;
+ let l1 = $ipred.left[1] as u16;
+ let l2 = $ipred.left[2] as u16;
+ let l3 = $ipred.left[3] as u16;
+ let p0 = ((tl + l0 + 1) >> 1) as u8;
+ let p1 = ((l0 + l1 + 1) >> 1) as u8;
+ let p2 = ((l1 + l2 + 1) >> 1) as u8;
+ let p3 = ((l2 + l3 + 1) >> 1) as u8;
+ (p0, p1, p2, p3)
+ }};
+}
+
+pub struct IPred4x4 {}
+impl IPred4x4 {
+ pub fn ipred_dc(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let dc;
+ let mut dcsum = 0;
+ for el in ipred.left.iter().take(4) {
+ dcsum += *el as u16;
+ }
+ for el in ipred.top.iter().take(4) {
+ dcsum += *el as u16;
+ }
+ dc = ((dcsum + (1 << 2)) >> 3) as u8;
+ for _ in 0..4 {
+ let out = &mut dst[off..][..4];
+ for el in out.iter_mut() {
+ *el = dc;
+ }
+ off += stride;
+ }
+ }
+ pub fn ipred_tm(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let tl = ipred.tl as i16;
+ for m in 0..4 {
+ for n in 0..4 {
+ dst[off + n] = clip_u8((ipred.left[m] as i16) + (ipred.top[n] as i16) - tl);
+ }
+ off += stride;
+ }
+ }
+ pub fn ipred_ve(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let (v0, v1, v2, v3) = load_pred4!(top; ipred);
+ let vert_pred = [v0, v1, v2, v3];
+ for _ in 0..4 {
+ let out = &mut dst[off..][..4];
+ out.copy_from_slice(&vert_pred);
+ off += stride;
+ }
+ }
+ pub fn ipred_he(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let (p0, p1, p2, _) = load_pred4!(left; ipred);
+ let p3 = (((ipred.left[2] as u16) + (ipred.left[3] as u16) * 3 + 2) >> 2) as u8;
+ let hor_pred = [p0, p1, p2, p3];
+ for m in 0..4 {
+ for n in 0..4 {
+ dst[off + n] = hor_pred[m];
+ }
+ off += stride;
+ }
+ }
+ pub fn ipred_ld(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let (_, p0, p1, p2) = load_pred4!(top; ipred);
+ let (p3, p4, p5, p6) = load_pred4!(top8; ipred);
+
+ dst[off + 0] = p0; dst[off + 1] = p1; dst[off + 2] = p2; dst[off + 3] = p3;
+ off += stride;
+ dst[off + 0] = p1; dst[off + 1] = p2; dst[off + 2] = p3; dst[off + 3] = p4;
+ off += stride;
+ dst[off + 0] = p2; dst[off + 1] = p3; dst[off + 2] = p4; dst[off + 3] = p5;
+ off += stride;
+ dst[off + 0] = p3; dst[off + 1] = p4; dst[off + 2] = p5; dst[off + 3] = p6;
+ }
+ pub fn ipred_rd(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let tl = load_pred4!(topleft; ipred);
+ let (l0, l1, l2, _) = load_pred4!(left; ipred);
+ let (t0, t1, t2, _) = load_pred4!(top; ipred);
+
+ dst[off + 0] = tl; dst[off + 1] = t0; dst[off + 2] = t1; dst[off + 3] = t2;
+ off += stride;
+ dst[off + 0] = l0; dst[off + 1] = tl; dst[off + 2] = t0; dst[off + 3] = t1;
+ off += stride;
+ dst[off + 0] = l1; dst[off + 1] = l0; dst[off + 2] = tl; dst[off + 3] = t0;
+ off += stride;
+ dst[off + 0] = l2; dst[off + 1] = l1; dst[off + 2] = l0; dst[off + 3] = tl;
+ }
+ pub fn ipred_vr(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let tl = load_pred4!(topleft; ipred);
+ let (l0, l1, _, _) = load_pred4!(left; ipred);
+ let (t0, t1, t2, _) = load_pred4!(top; ipred);
+ let (m0, m1, m2, m3) = load_pred4!(topavg; ipred);
+
+ dst[off + 0] = m0; dst[off + 1] = m1; dst[off + 2] = m2; dst[off + 3] = m3;
+ off += stride;
+ dst[off + 0] = tl; dst[off + 1] = t0; dst[off + 2] = t1; dst[off + 3] = t2;
+ off += stride;
+ dst[off + 0] = l0; dst[off + 1] = m0; dst[off + 2] = m1; dst[off + 3] = m2;
+ off += stride;
+ dst[off + 0] = l1; dst[off + 1] = tl; dst[off + 2] = t0; dst[off + 3] = t1;
+ }
+ pub fn ipred_vl(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let (_, t1, t2, t3) = load_pred4!(top; ipred);
+ let (t4, t5, t6, _) = load_pred4!(top8; ipred);
+ let (_, m1, m2, m3) = load_pred4!(topavg; ipred);
+ let m4 = (((ipred.top[3] as u16) + (ipred.top[4] as u16) + 1) >> 1) as u8;
+
+ dst[off + 0] = m1; dst[off + 1] = m2; dst[off + 2] = m3; dst[off + 3] = m4;
+ off += stride;
+ dst[off + 0] = t1; dst[off + 1] = t2; dst[off + 2] = t3; dst[off + 3] = t4;
+ off += stride;
+ dst[off + 0] = m2; dst[off + 1] = m3; dst[off + 2] = m4; dst[off + 3] = t5;
+ off += stride;
+ dst[off + 0] = t2; dst[off + 1] = t3; dst[off + 2] = t4; dst[off + 3] = t6;
+ }
+ pub fn ipred_hd(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let tl = load_pred4!(topleft; ipred);
+ let (l0, l1, l2, _) = load_pred4!(left; ipred);
+ let (m0, m1, m2, m3) = load_pred4!(leftavg; ipred);
+ let (t0, t1, _, _) = load_pred4!(top; ipred);
+
+ dst[off + 0] = m0; dst[off + 1] = tl; dst[off + 2] = t0; dst[off + 3] = t1;
+ off += stride;
+ dst[off + 0] = m1; dst[off + 1] = l0; dst[off + 2] = m0; dst[off + 3] = tl;
+ off += stride;
+ dst[off + 0] = m2; dst[off + 1] = l1; dst[off + 2] = m1; dst[off + 3] = l0;
+ off += stride;
+ dst[off + 0] = m3; dst[off + 1] = l2; dst[off + 2] = m2; dst[off + 3] = l1;
+ }
+ pub fn ipred_hu(dst: &mut [u8], mut off: usize, stride: usize, ipred: &IPredContext) {
+ let (_, m1, m2, m3) = load_pred4!(leftavg; ipred);
+ let (_, l1, l2, _) = load_pred4!(left; ipred);
+ let l3 = (((ipred.left[2] as u16) + (ipred.left[3] as u16) * 3 + 2) >> 2) as u8;
+ let p3 = ipred.left[3];
+
+ dst[off + 0] = m1; dst[off + 1] = l1; dst[off + 2] = m2; dst[off + 3] = l2;
+ off += stride;
+ dst[off + 0] = m2; dst[off + 1] = l2; dst[off + 2] = m3; dst[off + 3] = l3;
+ off += stride;
+ dst[off + 0] = m3; dst[off + 1] = l3; dst[off + 2] = p3; dst[off + 3] = p3;
+ off += stride;
+ dst[off + 0] = p3; dst[off + 1] = p3; dst[off + 2] = p3; dst[off + 3] = p3;
+ }
+}
+
+fn delta(p1: i16, p0: i16, q0: i16, q1: i16) -> i16 {
+ (p1 - q1) + 3 * (q0 - p0)
+}
+
+pub type LoopFilterFunc = fn(buf: &mut [u8], off: usize, step: usize, stride: usize, len: usize, thr: i16, thr_inner: i16, thr_hev: i16);
+
+pub fn simple_loop_filter(buf: &mut [u8], mut off: usize, step: usize, stride: usize, len: usize, thr: i16, _thr_inner: i16, _thr_hev: i16) {
+ for _ in 0..len {
+ let p1 = buf[off - step * 2] as i16;
+ let p0 = buf[off - step * 1] as i16;
+ let q0 = buf[off + step * 0] as i16;
+ let q1 = buf[off + step * 1] as i16;
+ let dpq = p0 - q0;
+ if dpq.abs() < thr {
+ let diff = delta(p1, p0, q0, q1);
+ let diffq0 = (diff.min(127) + 4) >> 3;
+ let diffp0 = diffq0 - if (diff & 7) == 4 { 1 } else { 0 };
+ buf[off - step * 1] = clip_u8(p0 + diffp0);
+ buf[off + step * 0] = clip_u8(q0 - diffq0);
+ }
+ off += stride;
+ }
+}
+
+fn normal_loop_filter(buf: &mut [u8], mut off: usize, step: usize, stride: usize, len: usize, thr: i16, thr_inner: i16, thr_hev: i16, edge: bool) {
+ for _ in 0..len {
+ let p0 = buf[off - step * 1] as i16;
+ let q0 = buf[off + step * 0] as i16;
+ let dpq = p0 - q0;
+ if dpq.abs() <= thr {
+ let p3 = buf[off - step * 4] as i16;
+ let p2 = buf[off - step * 3] as i16;
+ let p1 = buf[off - step * 2] as i16;
+ let q1 = buf[off + step * 1] as i16;
+ let q2 = buf[off + step * 2] as i16;
+ let q3 = buf[off + step * 3] as i16;
+ let dp2 = p3 - p2;
+ let dp1 = p2 - p1;
+ let dp0 = p1 - p0;
+ let dq0 = q1 - q0;
+ let dq1 = q2 - q1;
+ let dq2 = q3 - q2;
+ if (dp0.abs() <= thr_inner) && (dp1.abs() <= thr_inner) &&
+ (dp2.abs() <= thr_inner) && (dq0.abs() <= thr_inner) &&
+ (dq1.abs() <= thr_inner) && (dq2.abs() <= thr_inner) {
+ let high_edge_variation = (dp0.abs() > thr_hev) || (dq0.abs() > thr_hev);
+ if high_edge_variation {
+ let diff = delta(p1, p0, q0, q1);
+ let diffq0 = (diff.min(127) + 4) >> 3;
+ let diffp0 = diffq0 - if (diff & 7) == 4 { 1 } else { 0 };
+ buf[off - step * 1] = clip_u8(p0 + diffp0);
+ buf[off + step * 0] = clip_u8(q0 - diffq0);
+ } else if edge {
+ let d = delta(p1, p0, q0, q1);
+ let diff0 = (d * 27 + 63) >> 7;
+ buf[off - step * 1] = clip_u8(p0 + diff0);
+ buf[off + step * 0] = clip_u8(q0 - diff0);
+ let diff1 = (d * 18 + 63) >> 7;
+ buf[off - step * 2] = clip_u8(p1 + diff1);
+ buf[off + step * 1] = clip_u8(q1 - diff1);
+ let diff2 = (d * 9 + 63) >> 7;
+ buf[off - step * 3] = clip_u8(p2 + diff2);
+ buf[off + step * 2] = clip_u8(q2 - diff2);
+ } else {
+ let diff = 3 * (q0 - p0);
+ let diffq0 = (diff.min(127) + 4) >> 3;
+ let diffp0 = diffq0 - if (diff & 7) == 4 { 1 } else { 0 };
+ buf[off - step * 1] = clip_u8(p0 + diffp0);
+ buf[off + step * 0] = clip_u8(q0 - diffq0);
+ let diff2 = (diffq0 + 1) >> 1;
+ buf[off - step * 2] = clip_u8(p1 + diff2);
+ buf[off + step * 1] = clip_u8(q1 - diff2);
+ }
+ }
+ }
+ off += stride;
+ }
+}
+
+pub fn normal_loop_filter_inner(buf: &mut [u8], off: usize, step: usize, stride: usize, len: usize, thr: i16, thr_inner: i16, thr_hev: i16) {
+ normal_loop_filter(buf, off, step, stride, len, thr, thr_inner, thr_hev, false);
+}
+
+pub fn normal_loop_filter_edge(buf: &mut [u8], off: usize, step: usize, stride: usize, len: usize, thr: i16, thr_inner: i16, thr_hev: i16) {
+ normal_loop_filter(buf, off, step, stride, len, thr, thr_inner, thr_hev, true);
+}
+
+const VP7_BICUBIC_FILTERS: [[i16; 6]; 8] = [
+ [ 0, 0, 128, 0, 0, 0 ],
+ [ 0, -6, 123, 12, -1, 0 ],
+ [ 2, -11, 108, 36, -8, 1 ],
+ [ 0, -9, 93, 50, -6, 0 ],
+ [ 3, -16, 77, 77, -16, 3 ],
+ [ 0, -6, 50, 93, -9, 0 ],
+ [ 1, -8, 36, 108, -11, 2 ],
+ [ 0, -1, 12, 123, -6, 0 ]
+];
+
+macro_rules! interpolate {
+ ($src: expr, $off: expr, $step: expr, $mode: expr) => {{
+ let s0 = $src[$off + 0 * $step] as i32;
+ let s1 = $src[$off + 1 * $step] as i32;
+ let s2 = $src[$off + 2 * $step] as i32;
+ let s3 = $src[$off + 3 * $step] as i32;
+ let s4 = $src[$off + 4 * $step] as i32;
+ let s5 = $src[$off + 5 * $step] as i32;
+ let filt = &VP7_BICUBIC_FILTERS[$mode];
+ let src = [s0, s1, s2, s3, s4, s5];
+ let mut val = 64;
+ for (s, c) in src.iter().zip(filt.iter()) {
+ val += s * (*c as i32);
+ }
+ clip_u8((val >> 7) as i16)
+ }}
+}
+
+const EDGE_PRE: usize = 2;
+const EDGE_POST: usize = 4;
+const TMP_STRIDE: usize = 16;
+
+fn mc_block_common(dst: &mut [u8], mut doff: usize, dstride: usize, src: &[u8], sstride: usize, size: usize, mx: usize, my: usize) {
+ if (mx == 0) && (my == 0) {
+ let dst = &mut dst[doff..];
+ let src = &src[EDGE_PRE + EDGE_PRE * sstride..];
+ for (out, src) in dst.chunks_mut(dstride).take(size).zip(src.chunks(sstride)) {
+ (&mut out[0..size]).copy_from_slice(&src[0..size]);
+ }
+ } else if my == 0 {
+ let src = &src[EDGE_PRE * sstride..];
+ for src in src.chunks(sstride).take(size) {
+ for x in 0..size {
+ dst[doff + x] = interpolate!(src, x, 1, mx);
+ }
+ doff += dstride;
+ }
+ } else if mx == 0 {
+ let src = &src[EDGE_PRE..];
+ for y in 0..size {
+ for x in 0..size {
+ dst[doff + x] = interpolate!(src, x + y * sstride, sstride, my);
+ }
+ doff += dstride;
+ }
+ } else {
+ let mut tmp = [0u8; TMP_STRIDE * (16 + EDGE_PRE + EDGE_POST)];
+ for (y, dst) in tmp.chunks_mut(TMP_STRIDE).take(size + EDGE_PRE + EDGE_POST).enumerate() {
+ for x in 0..size {
+ dst[x] = interpolate!(src, x + y * sstride, 1, mx);
+ }
+ }
+ for y in 0..size {
+ for x in 0..size {
+ dst[doff + x] = interpolate!(tmp, x + y * TMP_STRIDE, TMP_STRIDE, my);
+ }
+ doff += dstride;
+ }
+ }
+}
+fn mc_block(dst: &mut [u8], doff: usize, dstride: usize, xpos: usize, ypos: usize,
+ mvx: i16, mvy: i16, reffrm: NAVideoBufferRef<u8>, plane: usize,
+ mc_buf: &mut [u8], size: usize) {
+ if (mvx == 0) && (mvy == 0) {
+ let dst = &mut dst[doff..];
+ let sstride = reffrm.get_stride(plane);
+ let srcoff = reffrm.get_offset(plane) + xpos + ypos * sstride;
+ let src = &reffrm.get_data();
+ let src = &src[srcoff..];
+ for (out, src) in dst.chunks_mut(dstride).take(size).zip(src.chunks(sstride)) {
+ (&mut out[0..size]).copy_from_slice(&src[0..size]);
+ }
+ return;
+ }
+ let (w, h) = reffrm.get_dimensions(plane);
+ let wa = if plane == 0 { ((w + 15) & !15) } else { ((w + 7) & !7) } as isize;
+ let ha = if plane == 0 { ((h + 15) & !15) } else { ((h + 7) & !7) } as isize;
+ let bsize = (size as isize) + (EDGE_PRE as isize) + (EDGE_POST as isize);
+ let ref_x = (xpos as isize) + ((mvx >> 3) as isize) - (EDGE_PRE as isize);
+ let ref_y = (ypos as isize) + ((mvy >> 3) as isize) - (EDGE_PRE as isize);
+
+ let (src, sstride) = if (ref_x < 0) || (ref_x + bsize > wa) || (ref_y < 0) || (ref_y + bsize > ha) {
+ edge_emu(&reffrm, ref_x, ref_y, bsize as usize, bsize as usize, mc_buf, 32, plane);
+ (mc_buf as &[u8], 32)
+ } else {
+ let off = reffrm.get_offset(plane);
+ let stride = reffrm.get_stride(plane);
+ let data = reffrm.get_data();
+ (&data[off + (ref_x as usize) + (ref_y as usize) * stride..], stride)
+ };
+ let mx = (mvx & 7) as usize;
+ let my = (mvy & 7) as usize;
+ mc_block_common(dst, doff, dstride, src, sstride, size, mx, my);
+}
+pub fn mc_block16x16(dst: &mut [u8], doff: usize, dstride: usize, xpos: usize, ypos: usize,
+ mvx: i16, mvy: i16, src: NAVideoBufferRef<u8>, plane: usize, mc_buf: &mut [u8]) {
+ mc_block(dst, doff, dstride, xpos, ypos, mvx, mvy, src, plane, mc_buf, 16);
+}
+pub fn mc_block8x8(dst: &mut [u8], doff: usize, dstride: usize, xpos: usize, ypos: usize,
+ mvx: i16, mvy: i16, src: NAVideoBufferRef<u8>, plane: usize, mc_buf: &mut [u8]) {
+ mc_block(dst, doff, dstride, xpos, ypos, mvx, mvy, src, plane, mc_buf, 8);
+}
+pub fn mc_block4x4(dst: &mut [u8], doff: usize, dstride: usize, xpos: usize, ypos: usize,
+ mvx: i16, mvy: i16, src: NAVideoBufferRef<u8>, plane: usize, mc_buf: &mut [u8]) {
+ mc_block(dst, doff, dstride, xpos, ypos, mvx, mvy, src, plane, mc_buf, 4);
+}
+pub fn mc_block_special(dst: &mut [u8], doff: usize, dstride: usize, xpos: usize, ypos: usize,
+ mvx: i16, mvy: i16, reffrm: NAVideoBufferRef<u8>, plane: usize,
+ mc_buf: &mut [u8], size: usize, pitch_mode: u8) {
+ const Y_MUL: [isize; 8] = [ 1, 0, 2, 4, 1, 1, 2, 2 ];
+ const Y_OFF: [isize; 8] = [ 0, 4, 0, 0, 1, -1, 1, -1 ];
+ const ILACE_CHROMA: [bool; 8] = [ false, false, true, true, false, false, true, true ]; // mode&2 != 0
+
+ let pitch_mode = (pitch_mode & 7) as usize;
+ let (xstep, ymul) = if plane == 0 {
+ (Y_OFF[pitch_mode], Y_MUL[pitch_mode])
+ } else {
+ (0, if ILACE_CHROMA[pitch_mode] { 2 } else { 1 })
+ };
+
+ let (w, h) = reffrm.get_dimensions(plane);
+ let wa = if plane == 0 { ((w + 15) & !15) } else { ((w + 7) & !7) } as isize;
+ let ha = if plane == 0 { ((h + 15) & !15) } else { ((h + 7) & !7) } as isize;
+ let start_x = (xpos as isize) - (EDGE_PRE as isize) * xstep;
+ let end_x = (xpos as isize) + ((size + EDGE_POST) as isize) * xstep;
+ let start_y = (ypos as isize) - (EDGE_PRE as isize) * ymul;
+ let end_y = (ypos as isize) + ((size + EDGE_POST) as isize) * ymul;
+ let off = reffrm.get_offset(plane);
+ let stride = reffrm.get_stride(plane);
+ let (src, sstride) = if (start_x >= 0) && (end_x <= wa) && (start_y >= 0) && (end_y <= ha) {
+ let data = reffrm.get_data();
+ (&data[off + (start_x as usize) + (start_y as usize) * stride..],
+ ((stride as isize) + xstep) as usize)
+ } else {
+ let add = (size + EDGE_PRE + EDGE_POST) * (xstep.abs() as usize);
+ let bw = size + EDGE_PRE + EDGE_POST + add;
+ let bh = (end_y - start_y) as usize;
+ let bo = if xstep >= 0 { 0 } else { add };
+ edge_emu(&reffrm, start_x + (bo as isize), start_y, bw, bh, mc_buf, 128, plane);
+ (&mc_buf[bo..], (128 + xstep) as usize)
+ };
+ let mx = (mvx & 7) as usize;
+ let my = (mvy & 7) as usize;
+ match ymul {
+ 0 => unimplemented!(),
+ 1 => mc_block_common(dst, doff, dstride, src, sstride, size, mx, my),
+ 2 => {
+ let hsize = size / 2;
+ for y in 0..2 {
+ for x in 0..2 {
+ mc_block_common(dst, doff + x * hsize + y * hsize * dstride, dstride,
+ &src[x * hsize + y * sstride..], sstride * 2, hsize, mx, my);
+ }
+ }
+ },
+ 4 => {
+ let qsize = size / 4;
+ for y in 0..4 {
+ for x in 0..4 {
+ mc_block_common(dst, doff + x * qsize + y * qsize * dstride, dstride,
+ &src[x * qsize + y * sstride..], sstride * 4, qsize, mx, my);
+ }
+ }
+ },
+ _ => unreachable!(),
+ };
+}
+
+pub fn fade_frame(srcfrm: NAVideoBufferRef<u8>, dstfrm: &mut NASimpleVideoFrame<u8>, alpha: u16, beta: u16) {
+ let mut fade_lut = [0u8; 256];
+ for (i, el) in fade_lut.iter_mut().enumerate() {
+ let y = i as u16;
+ *el = (y + ((y * beta) >> 8) + alpha).max(0).min(255) as u8;
+ }
+
+ let (w, h) = srcfrm.get_dimensions(0);
+ let (wa, ha) = ((w + 15) & !15, (h + 15) & !15);
+ let soff = srcfrm.get_offset(0);
+ let sstride = srcfrm.get_stride(0);
+ let sdata = srcfrm.get_data();
+ let src = &sdata[soff..];
+ let dstride = dstfrm.stride[0];
+ let dst = &mut dstfrm.data[dstfrm.offset[0]..];
+ for (src, dst) in src.chunks(sstride).zip(dst.chunks_mut(dstride)).take(ha) {
+ for (s, d) in src.iter().zip(dst.iter_mut()).take(wa) {
+ *d = fade_lut[*s as usize];
+ }
+ }
+
+ for plane in 1..3 {
+ let (w, h) = srcfrm.get_dimensions(plane);
+ let (wa, ha) = ((w + 7) & !7, (h + 7) & !7);
+ let soff = srcfrm.get_offset(plane);
+ let sstride = srcfrm.get_stride(plane);
+ let sdata = srcfrm.get_data();
+ let src = &sdata[soff..];
+ let dstride = dstfrm.stride[plane];
+ let dst = &mut dstfrm.data[dstfrm.offset[plane]..];
+ for (src, dst) in src.chunks(sstride).zip(dst.chunks_mut(dstride)).take(ha) {
+ (&mut dst[0..wa]).copy_from_slice(&src[0..wa]);
+ }
+ }
+}