1 use nihav_core::codecs::*;
2 use nihav_codec_support::codecs::blockdsp;
3 use nihav_codec_support::codecs::blockdsp::*;
5 pub const VP_YUVA420_FORMAT: NAPixelFormaton = NAPixelFormaton{
6 model: ColorModel::YUV(YUVSubmodel::YUVJ),
9 Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 0, next_elem: 1}),
10 Some(NAPixelChromaton{ h_ss: 1, v_ss: 1, packed: false, depth: 8, shift: 0, comp_offs: 1, next_elem: 1}),
11 Some(NAPixelChromaton{ h_ss: 1, v_ss: 1, packed: false, depth: 8, shift: 0, comp_offs: 2, next_elem: 1}),
12 Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 3, next_elem: 1}),
20 #[derive(Clone,Copy,Debug,PartialEq)]
35 pub const VP_REF_INTER: u8 = 1;
36 pub const VP_REF_GOLDEN: u8 = 2;
40 pub fn is_intra(self) -> bool { self == VPMBType::Intra }
41 pub fn get_ref_id(self) -> u8 {
46 VPMBType::InterNearest |
48 VPMBType::InterFourMV => VP_REF_INTER,
54 impl Default for VPMBType {
55 fn default() -> Self { VPMBType::Intra }
59 pub struct VPShuffler {
60 lastframe: Option<NAVideoBufferRef<u8>>,
61 goldframe: Option<NAVideoBufferRef<u8>>,
65 pub fn new() -> Self { VPShuffler { lastframe: None, goldframe: None } }
66 pub fn clear(&mut self) { self.lastframe = None; self.goldframe = None; }
67 pub fn add_frame(&mut self, buf: NAVideoBufferRef<u8>) {
68 self.lastframe = Some(buf);
70 pub fn add_golden_frame(&mut self, buf: NAVideoBufferRef<u8>) {
71 self.goldframe = Some(buf);
73 pub fn get_last(&mut self) -> Option<NAVideoBufferRef<u8>> {
74 if let Some(ref frm) = self.lastframe {
80 pub fn get_golden(&mut self) -> Option<NAVideoBufferRef<u8>> {
81 if let Some(ref frm) = self.goldframe {
87 pub fn has_refs(&self) -> bool {
88 self.lastframe.is_some()
92 pub const VP56_COEF_BASE: [i16; 6] = [ 5, 7, 11, 19, 35, 67 ];
93 pub const VP56_COEF_ADD_PROBS: [[u8; 12]; 6] = [
94 [ 159, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
95 [ 165, 145, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
96 [ 173, 148, 140, 128, 0, 0, 0, 0, 0, 0, 0, 0 ],
97 [ 176, 155, 140, 135, 128, 0, 0, 0, 0, 0, 0, 0 ],
98 [ 180, 157, 141, 134, 130, 128, 0, 0, 0, 0, 0, 0 ],
99 [ 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 128 ],
103 pub struct BoolCoder<'a> {
112 impl<'a> BoolCoder<'a> {
113 pub fn new(src: &'a [u8]) -> DecoderResult<Self> {
114 if src.len() < 3 { return Err(DecoderError::ShortData); }
115 let value = (u32::from(src[0]) << 24) | (u32::from(src[1]) << 16) | (u32::from(src[2]) << 8) | u32::from(src[3]);
116 Ok(Self { src, pos: 4, value, range: 255, bits: 8 })
118 pub fn read_bool(&mut self) -> bool {
121 pub fn read_prob(&mut self, prob: u8) -> bool {
123 let split = 1 + (((self.range - 1) * u32::from(prob)) >> 8);
125 if self.value < (split << 24) {
130 self.value -= split << 24;
135 pub fn read_bits(&mut self, bits: u8) -> u32 {
138 val = (val << 1) | (self.read_prob(128) as u32);
142 pub fn read_byte(&mut self) -> u8 {
145 val = (val << 1) | (self.read_prob(128) as u8);
149 pub fn read_sbits(&mut self, bits: u8) -> i32 {
150 let mut val = if self.read_prob(128) { -1i32 } else { 0i32 };
152 val = (val << 1) | (self.read_prob(128) as i32);
156 pub fn read_probability(&mut self) -> u8 {
157 let val = self.read_bits(7) as u8;
164 fn renorm(&mut self) {
165 let shift = self.range.leading_zeros() & 7;
166 self.range <<= shift;
167 self.value <<= shift;
168 self.bits -= shift as i32;
169 if (self.bits <= 0) && (self.pos < self.src.len()) {
170 self.value |= u32::from(self.src[self.pos]) << (-self.bits as u8);
174 /* while self.range < 0x80 {
178 if (self.bits <= 0) && (self.pos < self.src.len()) {
179 self.value |= u32::from(self.src[self.pos]);
185 pub fn skip_bytes(&mut self, nbytes: usize) {
188 if self.pos < self.src.len() {
189 self.value |= u32::from(self.src[self.pos]);
197 #[allow(clippy::trivially_copy_pass_by_ref)]
198 pub fn rescale_prob(prob: u8, weights: &[i16; 2], maxval: i32) -> u8 {
199 (((i32::from(prob) * i32::from(weights[0]) + 128) >> 8) + i32::from(weights[1])).min(maxval).max(1) as u8
202 macro_rules! vp_tree {
203 ($bc: expr, $prob: expr, $node1: expr, $node2: expr) => {
204 if !$bc.read_prob($prob) {
210 ($leaf: expr) => { $leaf }
213 const C1S7: i32 = 64277;
214 const C2S6: i32 = 60547;
215 const C3S5: i32 = 54491;
216 const C4S4: i32 = 46341;
217 const C5S3: i32 = 36410;
218 const C6S2: i32 = 25080;
219 const C7S1: i32 = 12785;
221 fn mul16(a: i32, b: i32) -> i32 {
225 macro_rules! idct_step {
226 ($s0:expr, $s1:expr, $s2:expr, $s3:expr, $s4:expr, $s5:expr, $s6:expr, $s7:expr,
227 $d0:expr, $d1:expr, $d2:expr, $d3:expr, $d4:expr, $d5:expr, $d6:expr, $d7:expr,
228 $bias:expr, $shift:expr, $otype:ty) => {
229 let t_a = mul16(C1S7, i32::from($s1)) + mul16(C7S1, i32::from($s7));
230 let t_b = mul16(C7S1, i32::from($s1)) - mul16(C1S7, i32::from($s7));
231 let t_c = mul16(C3S5, i32::from($s3)) + mul16(C5S3, i32::from($s5));
232 let t_d = mul16(C3S5, i32::from($s5)) - mul16(C5S3, i32::from($s3));
233 let t_a1 = mul16(C4S4, t_a - t_c);
234 let t_b1 = mul16(C4S4, t_b - t_d);
237 let t_e = mul16(C4S4, i32::from($s0 + $s4)) + $bias;
238 let t_f = mul16(C4S4, i32::from($s0 - $s4)) + $bias;
239 let t_g = mul16(C2S6, i32::from($s2)) + mul16(C6S2, i32::from($s6));
240 let t_h = mul16(C6S2, i32::from($s2)) - mul16(C2S6, i32::from($s6));
241 let t_e1 = t_e - t_g;
243 let t_a = t_f + t_a1;
244 let t_f = t_f - t_a1;
245 let t_b = t_b1 - t_h;
246 let t_h = t_b1 + t_h;
248 $d0 = ((t_g + t_c) >> $shift) as $otype;
249 $d7 = ((t_g - t_c) >> $shift) as $otype;
250 $d1 = ((t_a + t_h) >> $shift) as $otype;
251 $d2 = ((t_a - t_h) >> $shift) as $otype;
252 $d3 = ((t_e1 + t_d) >> $shift) as $otype;
253 $d4 = ((t_e1 - t_d) >> $shift) as $otype;
254 $d5 = ((t_f + t_b) >> $shift) as $otype;
255 $d6 = ((t_f - t_b) >> $shift) as $otype;
259 pub fn vp_idct(coeffs: &mut [i16; 64]) {
260 let mut tmp = [0i32; 64];
261 for (src, dst) in coeffs.chunks(8).zip(tmp.chunks_mut(8)) {
262 idct_step!(src[0], src[1], src[2], src[3], src[4], src[5], src[6], src[7],
263 dst[0], dst[1], dst[2], dst[3], dst[4], dst[5], dst[6], dst[7], 0, 0, i32);
268 idct_step!(src[0 * 8 + i], src[1 * 8 + i], src[2 * 8 + i], src[3 * 8 + i],
269 src[4 * 8 + i], src[5 * 8 + i], src[6 * 8 + i], src[7 * 8 + i],
270 dst[0 * 8 + i], dst[1 * 8 + i], dst[2 * 8 + i], dst[3 * 8 + i],
271 dst[4 * 8 + i], dst[5 * 8 + i], dst[6 * 8 + i], dst[7 * 8 + i], 8, 4, i16);
275 pub fn vp_idct_dc(coeffs: &mut [i16; 64]) {
276 let dc = ((mul16(C4S4, mul16(C4S4, i32::from(coeffs[0]))) + 8) >> 4) as i16;
282 pub fn unquant(coeffs: &mut [i16; 64], qmat: &[i16; 64]) {
284 coeffs[i] = coeffs[i].wrapping_mul(qmat[i]);
288 pub fn vp_put_block(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
290 let mut off = frm.offset[plane] + bx * 8 + by * 8 * frm.stride[plane];
293 frm.data[off + x] = (coeffs[x + y * 8] + 128).min(255).max(0) as u8;
295 off += frm.stride[plane];
299 pub fn vp_put_block_ilace(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
301 let mut off = frm.offset[plane] + bx * 8 + ((by & !1) * 8 + (by & 1)) * frm.stride[plane];
304 frm.data[off + x] = (coeffs[x + y * 8] + 128).min(255).max(0) as u8;
306 off += frm.stride[plane] * 2;
310 pub fn vp_put_block_dc(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
312 let dc = (coeffs[0] + 128).min(255).max(0) as u8;
313 let mut off = frm.offset[plane] + bx * 8 + by * 8 * frm.stride[plane];
316 frm.data[off + x] = dc;
318 off += frm.stride[plane];
322 pub fn vp_add_block(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
324 let mut off = frm.offset[plane] + bx * 8 + by * 8 * frm.stride[plane];
327 frm.data[off + x] = (coeffs[x + y * 8] + i16::from(frm.data[off + x])).min(255).max(0) as u8;
329 off += frm.stride[plane];
333 pub fn vp_add_block_ilace(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
335 let mut off = frm.offset[plane] + bx * 8 + ((by & !1) * 8 + (by & 1)) * frm.stride[plane];
338 frm.data[off + x] = (coeffs[x + y * 8] + i16::from(frm.data[off + x])).min(255).max(0) as u8;
340 off += frm.stride[plane] * 2;
344 pub fn vp_add_block_dc(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
347 let mut off = frm.offset[plane] + bx * 8 + by * 8 * frm.stride[plane];
350 frm.data[off + x] = (dc + i16::from(frm.data[off + x])).min(255).max(0) as u8;
352 off += frm.stride[plane];
356 pub fn vp31_loop_filter(data: &mut [u8], mut off: usize, step: usize, stride: usize,
357 len: usize, loop_str: i16) {
359 let a = i16::from(data[off - step * 2]);
360 let b = i16::from(data[off - step]);
361 let c = i16::from(data[off]);
362 let d = i16::from(data[off + step]);
363 let mut diff = ((a - d) + 3 * (c - b) + 4) >> 3;
364 if diff.abs() >= 2 * loop_str {
366 } else if diff.abs() >= loop_str {
368 diff = -diff - 2 * loop_str;
370 diff = -diff + 2 * loop_str;
374 data[off - step] = (b + diff).max(0).min(255) as u8;
375 data[off] = (c - diff).max(0).min(255) as u8;
382 pub fn vp_copy_block(dst: &mut NASimpleVideoFrame<u8>, src: NAVideoBufferRef<u8>, comp: usize,
383 dx: usize, dy: usize, mv_x: i16, mv_y: i16,
384 preborder: usize, postborder: usize, loop_str: i16,
385 mode: usize, interp: &[BlkInterpFunc], mut mc_buf: NAVideoBufferRef<u8>)
387 let sx = (dx as isize) + (mv_x as isize);
388 let sy = (dy as isize) + (mv_y as isize);
389 if ((sx | sy) & 7) == 0 {
390 copy_block(dst, src, comp, dx, dy, mv_x, mv_y, 8, 8, preborder, postborder, mode, interp);
393 let pre = preborder.max(2);
394 let post = postborder.max(1);
395 let bsize = 8 + pre + post;
396 let src_x = sx - (pre as isize);
397 let src_y = sy - (pre as isize);
399 let tmp_buf = NASimpleVideoFrame::from_video_buf(&mut mc_buf).unwrap();
400 edge_emu(src.as_ref(), src_x, src_y, bsize, bsize, &mut tmp_buf.data[tmp_buf.offset[comp]..], tmp_buf.stride[comp], comp, 0);
401 // copy_block(&mut tmp_buf, src, comp, 0, 0, src_x as i16, src_y as i16,
402 // bsize, bsize, 0, 0, 0, interp);
404 let foff = (8 - (sx & 7)) as usize;
405 let off = pre + foff + tmp_buf.offset[comp];
406 vp31_loop_filter(tmp_buf.data, off, 1, tmp_buf.stride[comp], bsize, loop_str);
409 let foff = (8 - (sy & 7)) as usize;
410 let off = (pre + foff) * tmp_buf.stride[comp] + tmp_buf.offset[comp];
411 vp31_loop_filter(tmp_buf.data, off, tmp_buf.stride[comp], 1, bsize, loop_str);
414 let dxoff = (pre as i16) - (dx as i16);
415 let dyoff = (pre as i16) - (dy as i16);
416 copy_block(dst, mc_buf, comp, dx, dy, dxoff, dyoff, 8, 8, preborder, postborder, mode, interp);
419 fn vp3_interp00(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
424 dst[didx..][..bw].copy_from_slice(&src[sidx..][..bw]);
430 fn vp3_interp01(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
435 for x in 0..bw { dst[didx + x] = ((u16::from(src[sidx + x]) + u16::from(src[sidx + x + 1])) >> 1) as u8; }
441 fn vp3_interp10(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
446 for x in 0..bw { dst[didx + x] = ((u16::from(src[sidx + x]) + u16::from(src[sidx + x + sstride])) >> 1) as u8; }
452 fn vp3_interp1x(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
458 dst[didx + x] = ((u16::from(src[sidx + x]) +
459 u16::from(src[sidx + x + sstride + 1])) >> 1) as u8;
466 fn vp3_interp1y(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
472 dst[didx + x] = ((u16::from(src[sidx + x + 1]) +
473 u16::from(src[sidx + x + sstride])) >> 1) as u8;
480 pub const VP3_INTERP_FUNCS: &[blockdsp::BlkInterpFunc] = &[ vp3_interp00, vp3_interp01, vp3_interp10, vp3_interp1x, vp3_interp1y ];