use nihav_core::codecs::*;
-use nihav_core::data::GenericCache;
use nihav_core::io::bitreader::*;
+use nihav_codec_support::codecs::{MV, ZERO_MV};
use super::vpcommon::*;
pub const TOKEN_LARGE: u8 = 5;
}
fn prob2weight(a: u8, b: u8) -> u8 {
- let w = (((a as u16) * (b as u16)) >> 8) as u8;
+ let w = ((u16::from(a) * u16::from(b)) >> 8) as u8;
if w == 0 {
1
} else {
let mut nlen = 0;
for w in weights.iter().rev() {
- let weight = *w as u16;
+ let weight = u16::from(*w);
let mut pos = nlen;
for i in 0..nlen {
if nodes[i].weight > weight {
fn read_huff(&mut self, huff: &VP6Huff) -> DecoderResult<u8> {
let peekval = self.peek(16);
for (i, (code, bit)) in huff.codes.iter().zip(huff.bits.iter()).enumerate() {
- if (peekval >> (16 - *bit)) == (*code as u32) {
- self.skip(*bit as u32)?;
+ if (peekval >> (16 - *bit)) == u32::from(*code) {
+ self.skip(u32::from(*bit))?;
return Ok(i as u8);
}
}
}
}
+#[derive(Default)]
+pub struct VP56DCPred {
+ dc_y: Vec<i16>,
+ dc_u: Vec<i16>,
+ dc_v: Vec<i16>,
+ ldc_y: [i16; 2],
+ ldc_u: i16,
+ ldc_v: i16,
+ ref_y: Vec<u8>,
+ ref_c: Vec<u8>,
+ ref_left: u8,
+ y_idx: usize,
+ c_idx: usize,
+}
+
+const INVALID_REF: u8 = 42;
+
+impl VP56DCPred {
+ fn new() -> Self { Self::default() }
+ fn resize(&mut self, mb_w: usize) {
+ self.dc_y.resize(mb_w * 2 + 2, 0);
+ self.dc_u.resize(mb_w + 2, 0);
+ self.dc_v.resize(mb_w + 2, 0);
+ self.ref_y.resize(mb_w * 2 + 2, INVALID_REF);
+ self.ref_c.resize(mb_w + 2, INVALID_REF);
+ self.ref_c[0] = 0;
+ }
+ fn reset(&mut self) {
+ self.update_row();
+ for el in self.ref_y.iter_mut().skip(1) { *el = INVALID_REF; }
+ for el in self.ref_c.iter_mut().skip(1) { *el = INVALID_REF; }
+ }
+ fn update_row(&mut self) {
+ self.y_idx = 1;
+ self.c_idx = 1;
+ self.ldc_y = [0; 2];
+ self.ldc_u = 0;
+ self.ldc_v = 0;
+ self.ref_left = INVALID_REF;
+ }
+ fn next_mb(&mut self) {
+ self.y_idx += 2;
+ self.c_idx += 1;
+ }
+}
+
pub struct VP56Decoder {
version: u8,
has_alpha: bool,
mb_w: usize,
mb_h: usize,
models: VP56Models,
+ amodels: VP56Models,
coeffs: [[i16; 64]; 6],
last_mbt: VPMBType,
mb_info: Vec<MBInfo>,
fstate: FrameState,
- dc_y: GenericCache<i16>,
- dc_u: GenericCache<i16>,
- dc_v: GenericCache<i16>,
- dc_a: GenericCache<i16>,
+ dc_pred: VP56DCPred,
last_dc: [[i16; 4]; 3],
top_ctx: [Vec<u8>; 4],
if token != 0 {
sign = bc.read_bool();
}
- level = token as i16;
+ level = i16::from(token);
} else {
let cat: usize = vp_tree!(bc, val_probs[6],
vp_tree!(bc, val_probs[7], 0, 1),
impl VP56Decoder {
pub fn new(version: u8, has_alpha: bool, flip: bool) -> Self {
- let vt = alloc_video_buffer(NAVideoInfo::new(24, 24, false, YUV420_FORMAT), 4).unwrap();
+ let vt = alloc_video_buffer(NAVideoInfo::new(24, 24, false, VP_YUVA420_FORMAT), 4).unwrap();
let mc_buf = vt.get_vbuf().unwrap();
Self {
version, has_alpha, flip,
mb_w: 0,
mb_h: 0,
models: VP56Models::new(),
+ amodels: VP56Models::new(),
coeffs: [[0; 64]; 6],
last_mbt: VPMBType::InterNoMV,
mb_info: Vec::new(),
fstate: FrameState::new(),
- dc_y: GenericCache::new(0, 0, 0),
- dc_u: GenericCache::new(0, 0, 0),
- dc_v: GenericCache::new(0, 0, 0),
- dc_a: GenericCache::new(0, 0, 0),
+ dc_pred: VP56DCPred::new(),
last_dc: [[0; 4]; 3],
top_ctx: [Vec::new(), Vec::new(), Vec::new(), Vec::new()],
self.mb_w = (self.width + 15) >> 4;
self.mb_h = (self.height + 15) >> 4;
self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
- self.dc_y = GenericCache::new(2, 1 + self.mb_w * 2, 0);
- self.dc_u = GenericCache::new(1, 1 + self.mb_w, 0);
- self.dc_v = GenericCache::new(1, 1 + self.mb_w, 0);
- self.dc_a = GenericCache::new(2, 1 + self.mb_w * 2, 0);
self.top_ctx = [vec![0; self.mb_w * 2], vec![0; self.mb_w], vec![0; self.mb_w], vec![0; self.mb_w * 2]];
}
pub fn init(&mut self, supp: &mut NADecoderSupport, vinfo: NAVideoInfo) -> DecoderResult<()> {
- supp.pool_u8.set_dec_bufs(3);
+ supp.pool_u8.set_dec_bufs(3 + if vinfo.get_format().has_alpha() { 1 } else { 0 });
supp.pool_u8.prealloc_video(NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, vinfo.get_format()), 4)?;
self.set_dimensions(vinfo.get_width(), vinfo.get_height());
+ self.dc_pred.resize(self.mb_w);
Ok(())
}
pub fn flush(&mut self) {
let hdr = br.parse_header(&mut bc)?;
validate!((hdr.offset as usize) < aoffset); //XXX: take alpha 3 byte offset into account?
- if hdr.mb_w != 0 {
+ if hdr.mb_w != 0 && (usize::from(hdr.mb_w) != self.mb_w || usize::from(hdr.mb_h) != self.mb_h) {
self.set_dimensions((hdr.mb_w as usize) * 16, (hdr.mb_h as usize) * 16);
}
- let vinfo = NAVideoInfo::new(self.width, self.height, self.flip, YUV420_FORMAT);
+ let fmt = if !self.has_alpha {
+ YUV420_FORMAT
+ } else {
+ VP_YUVA420_FORMAT
+ };
+ let vinfo = NAVideoInfo::new(self.width, self.height, self.flip, fmt);
let ret = supp.pool_u8.get_free();
if ret.is_none() {
return Err(DecoderError::AllocError);
if hdr.is_intra {
self.shuf.clear();
+ } else {
+ if !self.shuf.has_refs() {
+ return Err(DecoderError::MissingReference);
+ }
+ }
+
+ let psrc = &src[if self.has_alpha { 3 } else { 0 }..aoffset];
+ self.decode_planes(br, &mut dframe, &mut bc, &hdr, psrc, false)?;
+
+ if self.has_alpha {
+ let asrc = &src[aoffset + 3..];
+ let mut bc = BoolCoder::new(asrc)?;
+ let ahdr = br.parse_header(&mut bc)?;
+ validate!(ahdr.mb_w == hdr.mb_w && ahdr.mb_h == hdr.mb_h);
+ std::mem::swap(&mut self.models, &mut self.amodels);
+ let ret = self.decode_planes(br, &mut dframe, &mut bc, &ahdr, asrc, true);
+ std::mem::swap(&mut self.models, &mut self.amodels);
+ if let Err(err) = ret {
+ return Err(err);
+ }
+ match (hdr.is_golden, ahdr.is_golden) {
+ (true, true) => { self.shuf.add_golden_frame(buf.clone()); },
+ (true, false) => {
+ let cur_golden = self.shuf.get_golden().unwrap();
+ let off = cur_golden.get_offset(3);
+ let stride = cur_golden.get_stride(3);
+ let mut new_golden = supp.pool_u8.get_copy(&buf).unwrap();
+ let dst = new_golden.get_data_mut().unwrap();
+ let src = cur_golden.get_data();
+ dst[off..][..stride * self.mb_h * 16].copy_from_slice(&src[off..][..stride * self.mb_h * 16]);
+ self.shuf.add_golden_frame(new_golden);
+ },
+ (false, true) => {
+ let cur_golden = self.shuf.get_golden().unwrap();
+ let off = cur_golden.get_offset(3);
+ let stride = cur_golden.get_stride(3);
+ let mut new_golden = supp.pool_u8.get_copy(&cur_golden).unwrap();
+ let dst = new_golden.get_data_mut().unwrap();
+ let src = buf.get_data();
+ dst[off..][..stride * self.mb_h * 16].copy_from_slice(&src[off..][..stride * self.mb_h * 16]);
+ self.shuf.add_golden_frame(new_golden);
+ },
+ _ => {},
+ };
+ }
+
+ if hdr.is_golden && !self.has_alpha {
+ self.shuf.add_golden_frame(buf.clone());
}
+ self.shuf.add_frame(buf.clone());
+ Ok((NABufferType::Video(buf), if hdr.is_intra { FrameType::I } else { FrameType::P }))
+ }
+ fn decode_planes(&mut self, br: &mut dyn VP56Parser, dframe: &mut NASimpleVideoFrame<u8>, bc: &mut BoolCoder, hdr: &VP56Header, src: &[u8], alpha: bool) -> DecoderResult<()> {
let mut cr;
if hdr.multistream {
- let off = (if self.has_alpha { 3 } else { 0 }) + (hdr.offset as usize);
+ let off = hdr.offset as usize;
if !hdr.use_huffman {
let bc2 = BoolCoder::new(&src[off..])?;
cr = CoeffReader::Bool(bc2);
} else {
- let br = BitReader::new(&src[off..], aoffset - off, BitReaderMode::BE);
+ let br = BitReader::new(&src[off..], BitReaderMode::BE);
cr = CoeffReader::Huff(br);
}
} else {
br.reset_models(&mut self.models);
self.reset_mbtype_models();
} else {
- self.decode_mode_prob_models(&mut bc)?;
- br.decode_mv_models(&mut bc, &mut self.models.mv_models)?;
+ self.decode_mode_prob_models(bc)?;
+ br.decode_mv_models(bc, &mut self.models.mv_models)?;
}
- br.decode_coeff_models(&mut bc, &mut self.models, hdr.is_intra)?;
+ br.decode_coeff_models(bc, &mut self.models, hdr.is_intra)?;
if hdr.use_huffman {
for i in 0..2 {
self.models.vp6huff.dc_token_tree[i].build_codes(&self.models.coeff_models[i].dc_value_probs);
self.fstate = FrameState::new();
self.fstate.dc_quant = VP56_DC_QUANTS[hdr.quant as usize] * 4;
self.fstate.ac_quant = VP56_AC_QUANTS[hdr.quant as usize] * 4;
- self.loop_thr = VP56_FILTER_LIMITS[hdr.quant as usize] as i16;
+ self.loop_thr = i16::from(VP56_FILTER_LIMITS[hdr.quant as usize]);
self.last_mbt = VPMBType::InterNoMV;
- self.dc_y.reset();
- self.dc_u.reset();
- self.dc_v.reset();
- self.dc_a.reset();
for vec in self.top_ctx.iter_mut() {
for el in vec.iter_mut() {
*el = 0;
self.last_dc = [[0; 4]; 3];
self.last_dc[0][1] = 0x80;
self.last_dc[0][2] = 0x80;
+ self.dc_pred.reset();
self.ilace_mb = false;
for mb_y in 0..self.mb_h {
self.fstate.last_idx = [24; 4];
for mb_x in 0..self.mb_w {
self.fstate.mb_x = mb_x;
- self.decode_mb(&mut dframe, &mut bc, &mut cr, br, &hdr, false)?;
+ self.decode_mb(dframe, bc, &mut cr, br, &hdr, alpha)?;
+ self.dc_pred.next_mb();
}
- self.dc_y.update_row();
- self.dc_u.update_row();
- self.dc_v.update_row();
- self.dc_a.update_row();
- }
-
- if self.has_alpha {
- let asrc = &src[aoffset + 3..];
- let mut bc = BoolCoder::new(asrc)?;
- let ahdr = br.parse_header(&mut bc)?;
- validate!(ahdr.mb_w == hdr.mb_w && ahdr.mb_h == hdr.mb_h);
+ self.dc_pred.update_row();
}
-
- if hdr.is_golden {
- self.shuf.add_golden_frame(buf.clone());
- }
- self.shuf.add_frame(buf.clone());
-
- Ok((NABufferType::Video(buf), if hdr.is_intra { FrameType::I } else { FrameType::P }))
+ Ok(())
}
fn reset_mbtype_models(&mut self) {
const DEFAULT_XMITTED_PROBS: [[u8; 20]; 3] = [
let mut total = 0;
for i in 0..10 {
if i == mode { continue; }
- cnt[i] = 100 * (prob_xmitted[i * 2] as u32);
+ cnt[i] = 100 * u32::from(prob_xmitted[i * 2]);
total += cnt[i];
}
- let sum = (prob_xmitted[mode * 2] as u32) + (prob_xmitted[mode * 2 + 1] as u32);
- mdl.probs[9] = 255 - rescale_mb_mode_prob(prob_xmitted[mode * 2 + 1] as u32, sum);
+ let sum = u32::from(prob_xmitted[mode * 2]) + u32::from(prob_xmitted[mode * 2 + 1]);
+ mdl.probs[9] = 255 - rescale_mb_mode_prob(u32::from(prob_xmitted[mode * 2 + 1]), sum);
let inter_mv0_weight = (cnt[0] as u32) + (cnt[2] as u32);
let inter_mv1_weight = (cnt[3] as u32) + (cnt[4] as u32);
vp_tree!(bc, probs[5], VPMBType::Intra, VPMBType::InterFourMV),
vp_tree!(bc, probs[6],
vp_tree!(bc, probs[7], VPMBType::GoldenNoMV, VPMBType::GoldenMV),
- vp_tree!(bc, probs[8], VPMBType::InterNearest, VPMBType::InterNear)
+ vp_tree!(bc, probs[8], VPMBType::GoldenNearest, VPMBType::GoldenNear)
)
)
);
}
Ok(self.last_mbt)
}
+ #[allow(clippy::cognitive_complexity)]
fn decode_mb(&mut self, frm: &mut NASimpleVideoFrame<u8>, bc: &mut BoolCoder, cr: &mut CoeffReader, br: &mut dyn VP56Parser, hdr: &VP56Header, alpha: bool) -> DecoderResult<()> {
const FOURMV_SUB_TYPE: [VPMBType; 4] = [ VPMBType::InterNoMV, VPMBType::InterMV, VPMBType::InterNearest, VPMBType::InterNear ];
let prob = if mb_x == 0 {
iprob
} else if !self.ilace_mb {
- iprob + (((256 - (iprob as u16)) >> 1) as u8)
+ iprob + (((256 - u16::from(iprob)) >> 1) as u8)
} else {
iprob - (iprob >> 1)
};
}
}
}
- if !alpha {
- for blk_no in 4..6 {
- self.fstate.plane = blk_no - 3;
- self.fstate.ctx_idx = blk_no - 2;
- self.fstate.top_ctx = self.top_ctx[self.fstate.plane][mb_x];
- match cr {
- CoeffReader::None => {
- br.decode_block(bc, &mut self.coeffs[blk_no], &self.models.coeff_models[1], &self.models.vp6models, &mut self.fstate)?;
- },
- CoeffReader::Bool(ref mut bcc) => {
- br.decode_block(bcc, &mut self.coeffs[blk_no], &self.models.coeff_models[1], &self.models.vp6models, &mut self.fstate)?;
- },
- CoeffReader::Huff(ref mut brc) => {
- br.decode_block_huff(brc, &mut self.coeffs[blk_no], &self.models.vp6models, &self.models.vp6huff, &mut self.fstate)?;
- },
- };
- self.top_ctx[self.fstate.plane][mb_x] = self.fstate.top_ctx;
- self.predict_dc(mb_type, mb_pos, blk_no, alpha);
-
+ for blk_no in 4..6 {
+ self.fstate.plane = blk_no - 3;
+ self.fstate.ctx_idx = blk_no - 2;
+ self.fstate.top_ctx = self.top_ctx[self.fstate.plane][mb_x];
+ match cr {
+ CoeffReader::None => {
+ br.decode_block(bc, &mut self.coeffs[blk_no], &self.models.coeff_models[1], &self.models.vp6models, &mut self.fstate)?;
+ },
+ CoeffReader::Bool(ref mut bcc) => {
+ br.decode_block(bcc, &mut self.coeffs[blk_no], &self.models.coeff_models[1], &self.models.vp6models, &mut self.fstate)?;
+ },
+ CoeffReader::Huff(ref mut brc) => {
+ br.decode_block_huff(brc, &mut self.coeffs[blk_no], &self.models.vp6models, &self.models.vp6huff, &mut self.fstate)?;
+ },
+ };
+ self.top_ctx[self.fstate.plane][mb_x] = self.fstate.top_ctx;
+ self.predict_dc(mb_type, mb_pos, blk_no, alpha);
+ if !alpha {
let has_ac = self.fstate.last_idx[self.fstate.ctx_idx] > 0;
if mb_type.is_intra() {
if has_ac {
let x = self.fstate.mb_x * 8;
let y = self.fstate.mb_y * 8;
br.mc_block(frm, self.mc_buf.clone(), src.clone(), 1, x, y, mv, self.loop_thr);
- br.mc_block(frm, self.mc_buf.clone(), src.clone(), 2, x, y, mv, self.loop_thr);
+ br.mc_block(frm, self.mc_buf.clone(), src, 2, x, y, mv, self.loop_thr);
}
}
fn do_fourmv(&mut self, br: &dyn VP56Parser, frm: &mut NASimpleVideoFrame<u8>, mvs: &[MV; 4], alpha: bool) {
let sum = mvs[0] + mvs[1] + mvs[2] + mvs[3];
let mv = MV { x: sum.x / 4, y: sum.y / 4 };
br.mc_block(frm, self.mc_buf.clone(), src.clone(), 1, x, y, mv, self.loop_thr);
- br.mc_block(frm, self.mc_buf.clone(), src.clone(), 2, x, y, mv, self.loop_thr);
+ br.mc_block(frm, self.mc_buf.clone(), src, 2, x, y, mv, self.loop_thr);
}
}
- fn predict_dc(&mut self, mb_type: VPMBType, mb_pos: usize, blk_no: usize, alpha: bool) {
- let mb_x = self.fstate.mb_x;
+ fn predict_dc(&mut self, mb_type: VPMBType, _mb_pos: usize, blk_no: usize, _alpha: bool) {
let is_luma = blk_no < 4;
- let (plane, dcs) = if alpha { (0, &mut self.dc_a) } else {
- match blk_no {
- 4 => (1, &mut self.dc_u),
- 5 => (2, &mut self.dc_v),
- _ => (0, &mut self.dc_y),
- }
+ let (plane, dcs) = match blk_no {
+ 4 => (1, &mut self.dc_pred.dc_u),
+ 5 => (2, &mut self.dc_pred.dc_v),
+ _ => (0, &mut self.dc_pred.dc_y),
};
- let dc_pos = if is_luma {
- dcs.xpos + mb_x * 2 + (blk_no & 1) + (blk_no >> 1) * dcs.stride
+ let (dc_ref, dc_idx) = if is_luma {
+ (&mut self.dc_pred.ref_y, self.dc_pred.y_idx + (blk_no & 1))
} else {
- dcs.xpos + mb_x
+ (&mut self.dc_pred.ref_c, self.dc_pred.c_idx)
};
let ref_id = mb_type.get_ref_id();
- let has_left_blk = is_luma && ((blk_no & 1) != 0);
- let has_top_blk = is_luma && ((blk_no & 2) != 0);
let mut dc_pred = 0;
let mut count = 0;
- if has_left_blk || ((mb_x > 0) && (self.mb_info[mb_pos - 1].mb_type.get_ref_id() == ref_id)) {
- dc_pred += dcs.data[dc_pos - 1];
+ let has_left_blk = is_luma && ((blk_no & 1) == 1);
+ if has_left_blk || self.dc_pred.ref_left == ref_id {
+ dc_pred += match blk_no {
+ 0 | 1 => self.dc_pred.ldc_y[0],
+ 2 | 3 => self.dc_pred.ldc_y[1],
+ 4 => self.dc_pred.ldc_u,
+ _ => self.dc_pred.ldc_v,
+ };
count += 1;
}
- if has_top_blk || ((mb_pos >= self.mb_w) && (self.mb_info[mb_pos - self.mb_w].mb_type.get_ref_id() == ref_id)) {
- dc_pred += dcs.data[dc_pos - dcs.stride];
+ if dc_ref[dc_idx] == ref_id {
+ dc_pred += dcs[dc_idx];
count += 1;
}
if self.version == 5 {
- if (count < 2) && has_left_blk {
- dc_pred += dc_pred;
- count += 1;
- }
- if (count < 2) && !has_left_blk && has_top_blk && (mb_x > 0) && (self.mb_info[mb_pos - 1].mb_type.get_ref_id() == ref_id) {
- dc_pred += dc_pred;
- count += 1;
- }
- if (count < 2) && mb_pos == 0 && !is_luma {
- count += 1;
- }
- if (count < 2) && !has_left_blk && !has_top_blk && is_luma && (mb_x > 0) && (self.mb_info[mb_pos - 1].mb_type.get_ref_id() == ref_id) {
- dc_pred += dcs.data[dc_pos + dcs.stride - 1];
- count += 1;
- }
- if (count < 2) && blk_no == 2 {
- dc_pred += dcs.data[dc_pos - dcs.stride + 1];
+ if (count < 2) && (dc_ref[dc_idx - 1] == ref_id) {
+ dc_pred += dcs[dc_idx - 1];
count += 1;
}
- if (count < 2) && !has_left_blk && (mb_pos >= self.mb_w) && (self.mb_info[mb_pos - self.mb_w].mb_type.get_ref_id() == ref_id) {
- dc_pred += dcs.data[dc_pos - dcs.stride + 1];
- count += 1;
- }
- if (count < 2) && has_left_blk && (mb_pos > self.mb_w) && (mb_x < self.mb_w - 1) && (self.mb_info[mb_pos - self.mb_w + 1].mb_type.get_ref_id() == ref_id) {
- dc_pred += dcs.data[dc_pos - dcs.stride + 1];
+ if (count < 2) && (dc_ref[dc_idx + 1] == ref_id) {
+ dc_pred += dcs[dc_idx + 1];
count += 1;
}
}
dc_pred /= 2;
}
self.coeffs[blk_no][0] += dc_pred;
- self.last_dc[ref_id as usize][plane] = self.coeffs[blk_no][0];
- dcs.data[dc_pos] = self.coeffs[blk_no][0];
+
+ let dc = self.coeffs[blk_no][0];
+ if blk_no != 4 { // update top block reference only for the second chroma component
+ dc_ref[dc_idx] = ref_id;
+ }
+ match blk_no {
+ 0 | 1 => {
+ self.dc_pred.ldc_y[0] = dc;
+ },
+ 2 | 3 => {
+ self.dc_pred.ldc_y[1] = dc;
+ },
+ 4 => {
+ self.dc_pred.ldc_u = dc;
+ },
+ _ => {
+ self.dc_pred.ldc_v = dc;
+ self.dc_pred.ref_left = ref_id;
+ },
+ };
+ dcs[dc_idx] = dc;
+
+ self.last_dc[ref_id as usize][plane] = dc;
self.coeffs[blk_no][0] = self.coeffs[blk_no][0].wrapping_mul(self.fstate.dc_quant);
}
}