check for missing reference frames in various decoders
[nihav.git] / nihav-duck / src / codecs / vpcommon.rs
1 use nihav_core::codecs::*;
2 use nihav_core::codecs::blockdsp::*;
3
4 #[derive(Clone,Copy,Debug,PartialEq)]
5 #[allow(dead_code)]
6 pub enum VPMBType {
7 Intra,
8 InterNoMV,
9 InterMV,
10 InterNearest,
11 InterNear,
12 InterFourMV,
13 GoldenNoMV,
14 GoldenMV,
15 GoldenNearest,
16 GoldenNear,
17 }
18
19 pub const VP_REF_INTER: u8 = 1;
20 pub const VP_REF_GOLDEN: u8 = 2;
21
22 #[allow(dead_code)]
23 impl VPMBType {
24 pub fn is_intra(self) -> bool { self == VPMBType::Intra }
25 pub fn get_ref_id(self) -> u8 {
26 match self {
27 VPMBType::Intra => 0,
28 VPMBType::InterNoMV |
29 VPMBType::InterMV |
30 VPMBType::InterNearest |
31 VPMBType::InterNear |
32 VPMBType::InterFourMV => VP_REF_INTER,
33 _ => VP_REF_GOLDEN,
34 }
35 }
36 }
37
38 impl Default for VPMBType {
39 fn default() -> Self { VPMBType::Intra }
40 }
41
42 #[derive(Default)]
43 pub struct VPShuffler {
44 lastframe: Option<NAVideoBufferRef<u8>>,
45 goldframe: Option<NAVideoBufferRef<u8>>,
46 }
47
48 impl VPShuffler {
49 pub fn new() -> Self { VPShuffler { lastframe: None, goldframe: None } }
50 pub fn clear(&mut self) { self.lastframe = None; self.goldframe = None; }
51 pub fn add_frame(&mut self, buf: NAVideoBufferRef<u8>) {
52 self.lastframe = Some(buf);
53 }
54 pub fn add_golden_frame(&mut self, buf: NAVideoBufferRef<u8>) {
55 self.goldframe = Some(buf);
56 }
57 pub fn get_last(&mut self) -> Option<NAVideoBufferRef<u8>> {
58 if let Some(ref frm) = self.lastframe {
59 Some(frm.clone())
60 } else {
61 None
62 }
63 }
64 pub fn get_golden(&mut self) -> Option<NAVideoBufferRef<u8>> {
65 if let Some(ref frm) = self.goldframe {
66 Some(frm.clone())
67 } else {
68 None
69 }
70 }
71 pub fn has_refs(&self) -> bool {
72 self.lastframe.is_some()
73 }
74 }
75
76 pub const VP56_COEF_BASE: [i16; 6] = [ 5, 7, 11, 19, 35, 67 ];
77 pub const VP56_COEF_ADD_PROBS: [[u8; 12]; 6] = [
78 [ 159, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
79 [ 165, 145, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
80 [ 173, 148, 140, 128, 0, 0, 0, 0, 0, 0, 0, 0 ],
81 [ 176, 155, 140, 135, 128, 0, 0, 0, 0, 0, 0, 0 ],
82 [ 180, 157, 141, 134, 130, 128, 0, 0, 0, 0, 0, 0 ],
83 [ 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 128 ],
84 ];
85
86 #[allow(dead_code)]
87 pub struct BoolCoder<'a> {
88 pub src: &'a [u8],
89 pos: usize,
90 value: u32,
91 range: u32,
92 bits: i32,
93 }
94
95 #[allow(dead_code)]
96 impl<'a> BoolCoder<'a> {
97 pub fn new(src: &'a [u8]) -> DecoderResult<Self> {
98 if src.len() < 3 { return Err(DecoderError::ShortData); }
99 let value = ((src[0] as u32) << 24) | ((src[1] as u32) << 16) | ((src[2] as u32) << 8) | (src[3] as u32);
100 Ok(Self { src, pos: 4, value, range: 255, bits: 8 })
101 }
102 pub fn read_bool(&mut self) -> bool {
103 self.read_prob(128)
104 }
105 pub fn read_prob(&mut self, prob: u8) -> bool {
106 self.renorm();
107 let split = 1 + (((self.range - 1) * (prob as u32)) >> 8);
108 let bit;
109 if self.value < (split << 24) {
110 self.range = split;
111 bit = false;
112 } else {
113 self.range -= split;
114 self.value -= split << 24;
115 bit = true;
116 }
117 bit
118 }
119 pub fn read_bits(&mut self, bits: u8) -> u32 {
120 let mut val = 0u32;
121 for _ in 0..bits {
122 val = (val << 1) | (self.read_prob(128) as u32);
123 }
124 val
125 }
126 pub fn read_byte(&mut self) -> u8 {
127 let mut val = 0u8;
128 for _ in 0..8 {
129 val = (val << 1) | (self.read_prob(128) as u8);
130 }
131 val
132 }
133 pub fn read_sbits(&mut self, bits: u8) -> i32 {
134 let mut val = if self.read_prob(128) { -1i32 } else { 0i32 };
135 for _ in 1..bits {
136 val = (val << 1) | (self.read_prob(128) as i32);
137 }
138 val
139 }
140 pub fn read_probability(&mut self) -> u8 {
141 let val = self.read_bits(7) as u8;
142 if val == 0 {
143 1
144 } else {
145 val << 1
146 }
147 }
148 fn renorm(&mut self) {
149 let shift = self.range.leading_zeros() & 7;
150 self.range <<= shift;
151 self.value <<= shift;
152 self.bits -= shift as i32;
153 if (self.bits <= 0) && (self.pos < self.src.len()) {
154 self.value |= (self.src[self.pos] as u32) << (-self.bits as u8);
155 self.pos += 1;
156 self.bits += 8;
157 }
158 /* while self.range < 0x80 {
159 self.range <<= 1;
160 self.value <<= 1;
161 self.bits -= 1;
162 if (self.bits <= 0) && (self.pos < self.src.len()) {
163 self.value |= self.src[self.pos] as u32;
164 self.pos += 1;
165 self.bits = 8;
166 }
167 }*/
168 }
169 pub fn skip_bytes(&mut self, nbytes: usize) {
170 for _ in 0..nbytes {
171 self.value <<= 8;
172 if self.pos < self.src.len() {
173 self.value |= self.src[self.pos] as u32;
174 self.pos += 1;
175 }
176 }
177 }
178 }
179
180 #[allow(dead_code)]
181 pub fn rescale_prob(prob: u8, weights: &[i16; 2], maxval: i32) -> u8 {
182 ((((prob as i32) * (weights[0] as i32) + 128) >> 8) + (weights[1] as i32)).min(maxval).max(1) as u8
183 }
184
185 #[macro_export]
186 macro_rules! vp_tree {
187 ($bc: expr, $prob: expr, $node1: expr, $node2: expr) => {
188 if !$bc.read_prob($prob) {
189 $node1
190 } else {
191 $node2
192 }
193 };
194 ($leaf: expr) => { $leaf }
195 }
196
197 const C1S7: i32 = 64277;
198 const C2S6: i32 = 60547;
199 const C3S5: i32 = 54491;
200 const C4S4: i32 = 46341;
201 const C5S3: i32 = 36410;
202 const C6S2: i32 = 25080;
203 const C7S1: i32 = 12785;
204
205 fn mul16(a: i32, b: i32) -> i32 {
206 (a * b) >> 16
207 }
208
209 macro_rules! idct_step {
210 ($s0:expr, $s1:expr, $s2:expr, $s3:expr, $s4:expr, $s5:expr, $s6:expr, $s7:expr,
211 $d0:expr, $d1:expr, $d2:expr, $d3:expr, $d4:expr, $d5:expr, $d6:expr, $d7:expr,
212 $bias:expr, $shift:expr, $otype:ty) => {
213 let t_a = mul16(C1S7, i32::from($s1)) + mul16(C7S1, i32::from($s7));
214 let t_b = mul16(C7S1, i32::from($s1)) - mul16(C1S7, i32::from($s7));
215 let t_c = mul16(C3S5, i32::from($s3)) + mul16(C5S3, i32::from($s5));
216 let t_d = mul16(C3S5, i32::from($s5)) - mul16(C5S3, i32::from($s3));
217 let t_a1 = mul16(C4S4, t_a - t_c);
218 let t_b1 = mul16(C4S4, t_b - t_d);
219 let t_c = t_a + t_c;
220 let t_d = t_b + t_d;
221 let t_e = mul16(C4S4, i32::from($s0 + $s4)) + $bias;
222 let t_f = mul16(C4S4, i32::from($s0 - $s4)) + $bias;
223 let t_g = mul16(C2S6, i32::from($s2)) + mul16(C6S2, i32::from($s6));
224 let t_h = mul16(C6S2, i32::from($s2)) - mul16(C2S6, i32::from($s6));
225 let t_e1 = t_e - t_g;
226 let t_g = t_e + t_g;
227 let t_a = t_f + t_a1;
228 let t_f = t_f - t_a1;
229 let t_b = t_b1 - t_h;
230 let t_h = t_b1 + t_h;
231
232 $d0 = ((t_g + t_c) >> $shift) as $otype;
233 $d7 = ((t_g - t_c) >> $shift) as $otype;
234 $d1 = ((t_a + t_h) >> $shift) as $otype;
235 $d2 = ((t_a - t_h) >> $shift) as $otype;
236 $d3 = ((t_e1 + t_d) >> $shift) as $otype;
237 $d4 = ((t_e1 - t_d) >> $shift) as $otype;
238 $d5 = ((t_f + t_b) >> $shift) as $otype;
239 $d6 = ((t_f - t_b) >> $shift) as $otype;
240 }
241 }
242
243 pub fn vp_idct(coeffs: &mut [i16; 64]) {
244 let mut tmp = [0i32; 64];
245 for (src, dst) in coeffs.chunks(8).zip(tmp.chunks_mut(8)) {
246 idct_step!(src[0], src[1], src[2], src[3], src[4], src[5], src[6], src[7],
247 dst[0], dst[1], dst[2], dst[3], dst[4], dst[5], dst[6], dst[7], 0, 0, i32);
248 }
249 let src = &tmp;
250 let dst = coeffs;
251 for i in 0..8 {
252 idct_step!(src[0 * 8 + i], src[1 * 8 + i], src[2 * 8 + i], src[3 * 8 + i],
253 src[4 * 8 + i], src[5 * 8 + i], src[6 * 8 + i], src[7 * 8 + i],
254 dst[0 * 8 + i], dst[1 * 8 + i], dst[2 * 8 + i], dst[3 * 8 + i],
255 dst[4 * 8 + i], dst[5 * 8 + i], dst[6 * 8 + i], dst[7 * 8 + i], 8, 4, i16);
256 }
257 }
258
259 pub fn vp_idct_dc(coeffs: &mut [i16; 64]) {
260 let dc = ((mul16(C4S4, mul16(C4S4, i32::from(coeffs[0]))) + 8) >> 4) as i16;
261 for i in 0..64 {
262 coeffs[i] = dc;
263 }
264 }
265
266 pub fn unquant(coeffs: &mut [i16; 64], qmat: &[i16; 64]) {
267 for i in 1..64 {
268 coeffs[i] = coeffs[i].wrapping_mul(qmat[i]);
269 }
270 }
271
272 pub fn vp_put_block(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
273 vp_idct(coeffs);
274 let mut off = frm.offset[plane] + bx * 8 + by * 8 * frm.stride[plane];
275 for y in 0..8 {
276 for x in 0..8 {
277 frm.data[off + x] = (coeffs[x + y * 8] + 128).min(255).max(0) as u8;
278 }
279 off += frm.stride[plane];
280 }
281 }
282
283 pub fn vp_put_block_ilace(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
284 vp_idct(coeffs);
285 let mut off = frm.offset[plane] + bx * 8 + ((by & !1) * 8 + (by & 1)) * frm.stride[plane];
286 for y in 0..8 {
287 for x in 0..8 {
288 frm.data[off + x] = (coeffs[x + y * 8] + 128).min(255).max(0) as u8;
289 }
290 off += frm.stride[plane] * 2;
291 }
292 }
293
294 pub fn vp_put_block_dc(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
295 vp_idct_dc(coeffs);
296 let dc = (coeffs[0] + 128).min(255).max(0) as u8;
297 let mut off = frm.offset[plane] + bx * 8 + by * 8 * frm.stride[plane];
298 for _ in 0..8 {
299 for x in 0..8 {
300 frm.data[off + x] = dc;
301 }
302 off += frm.stride[plane];
303 }
304 }
305
306 pub fn vp_add_block(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
307 vp_idct(coeffs);
308 let mut off = frm.offset[plane] + bx * 8 + by * 8 * frm.stride[plane];
309 for y in 0..8 {
310 for x in 0..8 {
311 frm.data[off + x] = (coeffs[x + y * 8] + (frm.data[off + x] as i16)).min(255).max(0) as u8;
312 }
313 off += frm.stride[plane];
314 }
315 }
316
317 pub fn vp_add_block_ilace(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
318 vp_idct(coeffs);
319 let mut off = frm.offset[plane] + bx * 8 + ((by & !1) * 8 + (by & 1)) * frm.stride[plane];
320 for y in 0..8 {
321 for x in 0..8 {
322 frm.data[off + x] = (coeffs[x + y * 8] + (frm.data[off + x] as i16)).min(255).max(0) as u8;
323 }
324 off += frm.stride[plane] * 2;
325 }
326 }
327
328 pub fn vp_add_block_dc(coeffs: &mut [i16; 64], bx: usize, by: usize, plane: usize, frm: &mut NASimpleVideoFrame<u8>) {
329 vp_idct_dc(coeffs);
330 let dc = coeffs[0];
331 let mut off = frm.offset[plane] + bx * 8 + by * 8 * frm.stride[plane];
332 for _ in 0..8 {
333 for x in 0..8 {
334 frm.data[off + x] = (dc + (frm.data[off + x] as i16)).min(255).max(0) as u8;
335 }
336 off += frm.stride[plane];
337 }
338 }
339
340 pub fn vp31_loop_filter(data: &mut [u8], mut off: usize, step: usize, stride: usize,
341 len: usize, loop_str: i16) {
342 for _ in 0..len {
343 let a = data[off - step * 2] as i16;
344 let b = data[off - step] as i16;
345 let c = data[off] as i16;
346 let d = data[off + step] as i16;
347 let mut diff = ((a - d) + 3 * (c - b) + 4) >> 3;
348 if diff.abs() >= 2 * loop_str {
349 diff = 0;
350 } else if diff.abs() >= loop_str {
351 if diff < 0 {
352 diff = -diff - 2 * loop_str;
353 } else {
354 diff = -diff + 2 * loop_str;
355 }
356 }
357 if diff != 0 {
358 data[off - step] = (b + diff).max(0).min(255) as u8;
359 data[off] = (c - diff).max(0).min(255) as u8;
360 }
361
362 off += stride;
363 }
364 }
365
366 pub fn vp_copy_block(dst: &mut NASimpleVideoFrame<u8>, src: NAVideoBufferRef<u8>, comp: usize,
367 dx: usize, dy: usize, mv_x: i16, mv_y: i16,
368 preborder: usize, postborder: usize, loop_str: i16,
369 mode: usize, interp: &[BlkInterpFunc], mut mc_buf: NAVideoBufferRef<u8>)
370 {
371 let sx = (dx as isize) + (mv_x as isize);
372 let sy = (dy as isize) + (mv_y as isize);
373 if ((sx | sy) & 7) == 0 {
374 copy_block(dst, src, comp, dx, dy, mv_x, mv_y, 8, 8, preborder, postborder, mode, interp);
375 return;
376 }
377 let pre = preborder.max(2);
378 let post = postborder.max(1);
379 let bsize = 8 + pre + post;
380 let src_x = sx - (pre as isize);
381 let src_y = sy - (pre as isize);
382 {
383 let mut tmp_buf = NASimpleVideoFrame::from_video_buf(&mut mc_buf).unwrap();
384 copy_block(&mut tmp_buf, src, comp, 0, 0, src_x as i16, src_y as i16,
385 bsize, bsize, 0, 0, 0, interp);
386 if (sy & 7) != 0 {
387 let foff = (8 - (sy & 7)) as usize;
388 let off = (pre + foff) * tmp_buf.stride[comp];
389 vp31_loop_filter(tmp_buf.data, off, tmp_buf.stride[comp], 1, bsize, loop_str);
390 }
391 if (sx & 7) != 0 {
392 let foff = (8 - (sx & 7)) as usize;
393 let off = pre + foff;
394 vp31_loop_filter(tmp_buf.data, off, 1, tmp_buf.stride[comp], bsize, loop_str);
395 }
396 }
397 let dxoff = (pre as i16) - (dx as i16);
398 let dyoff = (pre as i16) - (dy as i16);
399 copy_block(dst, mc_buf, comp, dx, dy, dxoff, dyoff, 8, 8, preborder, postborder, 0/* mode*/, interp);
400 }
401
402 fn vp3_interp00(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
403 {
404 let mut didx = 0;
405 let mut sidx = 0;
406 for _ in 0..bh {
407 for x in 0..bw { dst[didx + x] = src[sidx + x]; }
408 didx += dstride;
409 sidx += sstride;
410 }
411 }
412
413 fn vp3_interp01(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
414 {
415 let mut didx = 0;
416 let mut sidx = 0;
417 for _ in 0..bh {
418 for x in 0..bw { dst[didx + x] = (((src[sidx + x] as u16) + (src[sidx + x + 1] as u16)) >> 1) as u8; }
419 didx += dstride;
420 sidx += sstride;
421 }
422 }
423
424 fn vp3_interp10(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
425 {
426 let mut didx = 0;
427 let mut sidx = 0;
428 for _ in 0..bh {
429 for x in 0..bw { dst[didx + x] = (((src[sidx + x] as u16) + (src[sidx + x + sstride] as u16)) >> 1) as u8; }
430 didx += dstride;
431 sidx += sstride;
432 }
433 }
434
435 fn vp3_interp11(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize)
436 {
437 let mut didx = 0;
438 let mut sidx = 0;
439 for _ in 0..bh {
440 for x in 0..bw {
441 dst[didx + x] = (((src[sidx + x] as u16) +
442 (src[sidx + x + 1] as u16) +
443 (src[sidx + x + sstride] as u16) +
444 (src[sidx + x + sstride + 1] as u16)) >> 2) as u8;
445 }
446 didx += dstride;
447 sidx += sstride;
448 }
449 }
450
451 pub const VP3_INTERP_FUNCS: &[blockdsp::BlkInterpFunc] = &[ vp3_interp00, vp3_interp01, vp3_interp10, vp3_interp11 ];
452