VP8 decoder
[nihav.git] / nihav-duck / src / codecs / vp8.rs
1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::codecs::{MV, ZERO_MV};
4 use super::vpcommon::*;
5 use super::vp78::*;
6 use super::vp78data::*;
7 use super::vp78dsp::*;
8 use super::vp8dsp::*;
9
10 #[derive(Clone,Copy,PartialEq,Debug)]
11 enum VP8Ref {
12 Intra,
13 Last,
14 Golden,
15 AltRef,
16 }
17
18 impl Default for VP8Ref {
19 fn default() -> Self { VP8Ref::Intra }
20 }
21
22 #[derive(Default)]
23 pub struct VP8Shuffler {
24 lastframe: Option<NAVideoBufferRef<u8>>,
25 goldframe: Option<NAVideoBufferRef<u8>>,
26 altframe: Option<NAVideoBufferRef<u8>>,
27 }
28
29 impl VP8Shuffler {
30 pub fn new() -> Self { Self::default() }
31 pub fn clear(&mut self) {
32 self.lastframe = None;
33 self.goldframe = None;
34 self.altframe = None;
35 }
36 pub fn add_frame(&mut self, buf: NAVideoBufferRef<u8>) {
37 self.lastframe = Some(buf);
38 }
39 pub fn add_golden_frame(&mut self, buf: NAVideoBufferRef<u8>) {
40 self.goldframe = Some(buf);
41 }
42 pub fn add_altref_frame(&mut self, buf: NAVideoBufferRef<u8>) {
43 self.altframe = Some(buf);
44 }
45 pub fn get_last(&mut self) -> Option<NAVideoBufferRef<u8>> {
46 if let Some(ref frm) = self.lastframe {
47 Some(frm.clone())
48 } else {
49 None
50 }
51 }
52 pub fn get_golden(&mut self) -> Option<NAVideoBufferRef<u8>> {
53 if let Some(ref frm) = self.goldframe {
54 Some(frm.clone())
55 } else {
56 None
57 }
58 }
59 pub fn get_altref(&mut self) -> Option<NAVideoBufferRef<u8>> {
60 if let Some(ref frm) = self.altframe {
61 Some(frm.clone())
62 } else {
63 None
64 }
65 }
66 pub fn has_refs(&self) -> bool {
67 self.lastframe.is_some()
68 }
69 }
70
71 struct SBParams<'a> {
72 coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4],
73 qmat: &'a [i16; 16],
74 }
75
76 pub const COEF_NE_TREE: &[VPTreeDef<DCTToken>] = &[
77 VPTreeDef::Value(DCTToken::Zero), VPTreeDef::Index(2),
78 VPTreeDef::Value(DCTToken::One), VPTreeDef::Index(4),
79 VPTreeDef::Index(6), VPTreeDef::Index(10),
80 VPTreeDef::Value(DCTToken::Two), VPTreeDef::Index(8),
81 VPTreeDef::Value(DCTToken::Three), VPTreeDef::Value(DCTToken::Four),
82 VPTreeDef::Index(12), VPTreeDef::Index(14),
83 VPTreeDef::Value(DCTToken::Cat1), VPTreeDef::Value(DCTToken::Cat2),
84 VPTreeDef::Index(16), VPTreeDef::Index(18),
85 VPTreeDef::Value(DCTToken::Cat3), VPTreeDef::Value(DCTToken::Cat4),
86 VPTreeDef::Value(DCTToken::Cat5), VPTreeDef::Value(DCTToken::Cat6)
87 ];
88
89 fn decode_subblock(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 {
90 const COEF_BANDS: [usize; 16] = [ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
91
92 let mut has_nz = 0;
93 let start = if ctype != 0 { 0 } else { 1 };
94 *coeffs = [0; 16];
95 let mut cval = pctx as usize;
96 for idx in start..16 {
97 let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval];
98 let tok = if cval != 0 || idx == start {
99 bc.read_tree(COEF_TREE, probs)
100 } else {
101 bc.read_tree(COEF_NE_TREE, &probs[1..])
102 };
103 if tok == DCTToken::EOB { break; }
104 let level = expand_token(bc, tok);
105 coeffs[DEFAULT_SCAN_ORDER[idx]] = level.wrapping_mul(sbparams.qmat[idx]);
106 cval = level.abs().min(2) as usize;
107 has_nz |= cval;
108 }
109 if has_nz > 0 { 1 } else { 0 }
110 }
111
112 #[derive(Clone,Copy,Default)]
113 struct MBInfo {
114 mb_type: VPMBType,
115 ymode: PredMode,
116 uvmode: PredMode,
117 loop_str: u8,
118 inner_filt: bool,
119 rframe: VP8Ref,
120 }
121
122 #[derive(Clone,Copy,Default)]
123 struct Segment {
124 quant: i8,
125 lf: i8,
126 }
127
128 #[derive(Default)]
129 struct SavedProbs {
130 kf_ymode_prob: [u8; 4],
131 kf_uvmode_prob: [u8; 3],
132
133 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
134 mv_probs: [[u8; 19]; 2],
135
136 segment_probs: [u8; 3],
137 }
138
139 #[derive(Default)]
140 struct DecoderState {
141 lf_simple: bool,
142 loop_filter_level: u8,
143 loop_sharpness: u8,
144
145 is_intra: bool,
146 version: u8,
147
148 kf_ymode_prob: [u8; 4],
149 kf_uvmode_prob: [u8; 3],
150
151 prob_intra_pred: u8,
152 prob_last_pred: u8,
153 prob_gold_pred: u8,
154 sign_bias: [bool; 2],
155
156 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
157 mv_probs: [[u8; 19]; 2],
158
159 segmentation: bool,
160 update_seg_map: bool,
161 force_quant: Option<u8>,
162 force_loop_str: Option<u8>,
163 segment_probs: [u8; 3],
164 seg: [Segment; 4],
165 seg_feature_mode: bool,
166
167 lf_delta: bool,
168 lf_frame_delta: [i8; 4],
169 lf_mode_delta: [i8; 4],
170
171 has_y2: bool,
172
173 ipred_ctx_y: IPredContext,
174 ipred_ctx_u: IPredContext,
175 ipred_ctx_v: IPredContext,
176 }
177
178 impl DecoderState {
179 fn reset(&mut self) {
180 const VP8_DEFAULT_MV_PROBS: [[u8; 19]; 2] = [
181 [ 162, 128, 225, 146, 172, 147, 214, 39, 156, 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 ],
182 [ 164, 128, 204, 170, 119, 235, 140, 230, 228, 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 ]
183 ];
184
185 self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS);
186 self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS);
187 self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS);
188 self.mv_probs.copy_from_slice(&VP8_DEFAULT_MV_PROBS);
189 self.segment_probs = [255; 3];
190 self.seg = [Segment::default(); 4];
191 }
192 fn restore(&mut self, dst: &SavedProbs) {
193 self.kf_ymode_prob = dst.kf_ymode_prob;
194 self.kf_uvmode_prob = dst.kf_uvmode_prob;
195 self.coef_probs = dst.coef_probs;
196 self.mv_probs = dst.mv_probs;
197 self.segment_probs = dst.segment_probs;
198 }
199 fn save(&self, dst: &mut SavedProbs) {
200 dst.kf_ymode_prob = self.kf_ymode_prob;
201 dst.kf_uvmode_prob = self.kf_uvmode_prob;
202 dst.coef_probs = self.coef_probs;
203 dst.mv_probs = self.mv_probs;
204 // dst.segment_probs = self.segment_probs;
205 }
206 }
207
208 fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 19]) -> i16 {
209 const LONG_VECTOR_ORDER: [usize; 9] = [ 0, 1, 2, 9, 8, 7, 6, 5, 4 ];
210
211 let val = if !bc.read_prob(probs[0]) {
212 bc.read_tree(SMALL_MV_TREE, &probs[2..9])
213 } else {
214 let raw_probs = &probs[9..];
215 let mut raw = 0;
216 for ord in LONG_VECTOR_ORDER.iter() {
217 raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord;
218 }
219 if (raw & 0x3F0) != 0 {
220 raw |= (bc.read_prob(raw_probs[3]) as i16) << 3;
221 } else {
222 raw |= 1 << 3;
223 }
224 raw
225 };
226 if (val == 0) || !bc.read_prob(probs[1]) {
227 val
228 } else {
229 -val
230 }
231 }
232
233 struct VP8Decoder {
234 info: NACodecInfoRef,
235
236 shuf: VP8Shuffler,
237 width: usize,
238 height: usize,
239 mb_w: usize,
240 mb_h: usize,
241 mb_info: Vec<MBInfo>,
242 mvs: Vec<MV>,
243 mv_stride: usize,
244
245 ymodes: Vec<PredMode>,
246 ymode_stride: usize,
247 uvmodes: Vec<PredMode>,
248 uvmode_stride: usize,
249
250 dstate: DecoderState,
251 pcache: PredCache,
252 tmp_probs: SavedProbs,
253
254 coeffs: [[i16; 16]; 25],
255 qmat: [[[i16; 16]; 3]; 5],
256
257 mc_buf: NAVideoBufferRef<u8>,
258
259 seg_map: Vec<u8>,
260 }
261
262 impl VP8Decoder {
263 fn new() -> Self {
264 let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap();
265 let mc_buf = vt.get_vbuf().unwrap();
266 Self {
267 info: NACodecInfoRef::default(),
268
269 shuf: VP8Shuffler::new(),
270 width: 0,
271 height: 0,
272 mb_w: 0,
273 mb_h: 0,
274 mb_info: Vec::new(),
275 mvs: Vec::new(),
276 mv_stride: 0,
277
278 ymodes: Vec::new(),
279 ymode_stride: 0,
280 uvmodes: Vec::new(),
281 uvmode_stride: 0,
282
283 dstate: DecoderState::default(),
284 pcache: PredCache::new(),
285 tmp_probs: SavedProbs::default(),
286
287 coeffs: [[0; 16]; 25],
288 qmat: [[[0; 16]; 3]; 5],
289
290 mc_buf,
291
292 seg_map: Vec::new(),
293 }
294 }
295 fn set_dimensions(&mut self, width: usize, height: usize) {
296 if (width == self.width) && (height == self.height) {
297 return;
298 }
299 self.width = width;
300 self.height = height;
301 self.mb_w = (self.width + 15) >> 4;
302 self.mb_h = (self.height + 15) >> 4;
303 self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
304 self.mv_stride = self.mb_w * 4;
305 self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV);
306
307 self.ymode_stride = self.mb_w * 4;
308 self.uvmode_stride = self.mb_w;
309 self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default());
310 self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default());
311
312 self.pcache.resize(self.mb_w);
313
314 self.seg_map.clear();
315 self.seg_map.resize(self.mb_w * self.mb_h, 0);
316 }
317 fn update_segmentation(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
318 self.dstate.update_seg_map = bc.read_bool();
319 if bc.read_bool() {
320 self.dstate.seg_feature_mode = bc.read_bool();
321 for seg in self.dstate.seg.iter_mut() {
322 if bc.read_bool() {
323 let quant_upd_val = bc.read_bits(7) as i8;
324 let quant_upd_sign = bc.read_bool();
325 seg.quant = if !quant_upd_sign { quant_upd_val } else { -quant_upd_val };
326 }
327 }
328 for seg in self.dstate.seg.iter_mut() {
329 if bc.read_bool() {
330 let lf_upd_val = bc.read_bits(6) as i8;
331 let lf_upd_sign = bc.read_bool();
332 seg.lf = if !lf_upd_sign { lf_upd_val } else { -lf_upd_val };
333 }
334 }
335 }
336 if self.dstate.update_seg_map {
337 self.tmp_probs.segment_probs = self.dstate.segment_probs;
338 for prob in self.dstate.segment_probs.iter_mut() {
339 if bc.read_bool() {
340 *prob = bc.read_byte();
341 }
342 }
343 }
344 Ok(())
345 }
346 fn mb_lf_adjustments(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
347 self.dstate.lf_delta = bc.read_bool();
348 if self.dstate.lf_delta {
349 if bc.read_bool() {
350 for frame_delta in self.dstate.lf_frame_delta.iter_mut() {
351 if bc.read_bool() {
352 let delta_magn = bc.read_bits(6) as i8;
353 let delta_sign = bc.read_bool();
354 *frame_delta = if !delta_sign { delta_magn } else { -delta_magn };
355 }
356 }
357 for mode_delta in self.dstate.lf_mode_delta.iter_mut() {
358 if bc.read_bool() {
359 let delta_magn = bc.read_bits(6) as i8;
360 let delta_sign = bc.read_bool();
361 *mode_delta = if !delta_sign { delta_magn } else { -delta_magn };
362 }
363 }
364 }
365 }
366 Ok(())
367 }
368 fn read_delta_quant(bc: &mut BoolCoder, y_ac_q: usize) -> DecoderResult<usize> {
369 if bc.read_bool() {
370 let delta = bc.read_bits(4) as usize;
371 if bc.read_bool() {
372 Ok(y_ac_q.saturating_sub(delta))
373 } else {
374 Ok((y_ac_q + delta).min(127))
375 }
376 } else {
377 Ok(y_ac_q)
378 }
379 }
380 fn quant_indices(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
381 let y_ac_q = bc.read_bits(7) as usize;
382 let y_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
383 let y2_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
384 let y2_ac_q = Self::read_delta_quant(bc, y_ac_q)?;
385 let uv_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
386 let uv_ac_q = Self::read_delta_quant(bc, y_ac_q)?;
387 self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
388
389 Ok(())
390 }
391 fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
392 for i in 0..4 {
393 for j in 0..8 {
394 for k in 0..3 {
395 for l in 0..11 {
396 if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) {
397 self.dstate.coef_probs[i][j][k][l] = bc.read_byte();
398 }
399 }
400 }
401 }
402 }
403 Ok(())
404 }
405 fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
406 const MV_UPDATE_PROBS: [[u8; 19]; 2] = [
407 [ 237, 246, 253, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 250, 250, 252, 254, 254 ],
408 [ 231, 243, 245, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 251, 251, 254, 254, 254 ]
409 ];
410 for comp in 0..2 {
411 for i in 0..19 {
412 if bc.read_prob(MV_UPDATE_PROBS[comp][i]) {
413 self.dstate.mv_probs[comp][i] = bc.read_probability();
414 }
415 }
416 }
417 Ok(())
418 }
419 fn decode_mb_features(&mut self, bc: &mut BoolCoder, mb_idx: usize) -> DecoderResult<()> {
420 let segment_id = bc.read_tree(FEATURE_TREE, &self.dstate.segment_probs);
421 self.seg_map[mb_idx] = segment_id as u8;
422
423 Ok(())
424 }
425 fn set_cur_segment(&mut self, mb_idx: usize) {
426 self.dstate.force_quant = Some(self.seg_map[mb_idx]);
427 let seg_id = self.seg_map[mb_idx] as usize;
428 let segment = &self.dstate.seg[seg_id];
429 let loop_str = if self.dstate.seg_feature_mode {
430 segment.lf as u8
431 } else {
432 (i16::from(self.dstate.loop_filter_level) + i16::from(segment.lf)).max(0).min(63) as u8
433 };
434 self.dstate.force_loop_str = Some(loop_str);
435 }
436 fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize) -> bool {
437 let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 };
438 let mut sbparams = SBParams {
439 qmat: &self.qmat[qmat_idx][2],
440 coef_probs: &self.dstate.coef_probs,
441 };
442 let mut has_ac = [false; 25];
443 let mut coded = false;
444 let ytype;
445 if self.dstate.has_y2 {
446 let pred = &self.pcache.y2_pred;
447 let pidx = pred.xpos + mb_x;
448 let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride];
449
450 let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams);
451 self.pcache.y2_pred.data[pidx] = has_nz;
452 self.pcache.y2_pred_left = has_nz;
453 has_ac[24] = has_nz > 0;
454 coded |= has_ac[24] | (self.coeffs[24][0] != 0);
455
456 ytype = 0;
457 } else {
458 let pred = &mut self.pcache.y2_pred;
459 let pidx = pred.xpos + mb_x;
460 pred.data[pidx] = pred.data[pidx - pred.stride];
461
462 ytype = 3;
463 }
464 sbparams.qmat = &self.qmat[qmat_idx][0];
465 for i in 0..16 {
466 let bx = i & 3;
467 let by = i >> 2;
468 let pred = &self.pcache.y_pred;
469 let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride;
470 let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride];
471
472 let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams);
473 self.pcache.y_pred.data[pidx] = has_nz;
474 self.pcache.y_pred_left[by] = has_nz;
475 has_ac[i] = has_nz > 0;
476 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
477 }
478 sbparams.qmat = &self.qmat[qmat_idx][1];
479 for i in 16..20 {
480 let bx = i & 1;
481 let by = (i >> 1) & 1;
482 let pred = &self.pcache.u_pred;
483 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
484 let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride];
485
486 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
487 self.pcache.u_pred.data[pidx] = has_nz;
488 self.pcache.u_pred_left[by] = has_nz;
489 has_ac[i] = has_nz > 0;
490 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
491 }
492 for i in 20..24 {
493 let bx = i & 1;
494 let by = (i >> 1) & 1;
495 let pred = &self.pcache.v_pred;
496 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
497 let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride];
498
499 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
500 self.pcache.v_pred.data[pidx] = has_nz;
501 self.pcache.v_pred_left[by] = has_nz;
502 has_ac[i] = has_nz > 0;
503 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
504 }
505
506 if self.dstate.has_y2 {
507 let y2block = &mut self.coeffs[24];
508 if has_ac[24] {
509 iwht4x4(y2block);
510 } else if y2block[0] != 0 {
511 iwht4x4_dc(y2block);
512 }
513 for i in 0..16 {
514 self.coeffs[i][0] = self.coeffs[24][i];
515 }
516 }
517 for i in 0..24 {
518 if has_ac[i] {
519 idct4x4(&mut self.coeffs[i]);
520 } else if self.coeffs[i][0] != 0 {
521 idct4x4_dc(&mut self.coeffs[i]);
522 }
523 }
524
525 coded
526 }
527
528 fn set_single_qmat(qmat: &mut [[i16; 16]; 3], y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
529 qmat[0][0] = DC_QUANTS[y_dc_q];
530 for i in 1..16 {
531 qmat[0][i] = AC_QUANTS[y_ac_q];
532 }
533 qmat[1][0] = DC_QUANTS[uv_dc_q].min(132);
534 for i in 1..16 {
535 qmat[1][i] = AC_QUANTS[uv_ac_q];
536 }
537 qmat[2][0] = DC_QUANTS[y2_dc_q] * 2;
538 for i in 1..16 {
539 qmat[2][i] = (i32::from(AC_QUANTS[y2_ac_q]) * 155 / 100).max(8) as i16;
540 }
541 }
542 fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
543 Self::set_single_qmat(&mut self.qmat[0], y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
544 if self.dstate.segmentation {
545 for (qmat, seg) in self.qmat[1..].iter_mut().zip(self.dstate.seg.iter()) {
546 let q = if self.dstate.seg_feature_mode {
547 seg.quant.max(0) as usize
548 } else {
549 ((y_ac_q as i16) + i16::from(seg.quant)).max(0).min(127) as usize
550 };
551 Self::set_single_qmat(qmat, q, q, q, q, q, q);
552 }
553 }
554 }
555 fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) {
556 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
557 for _ in 0..4 {
558 for x in 0..4 {
559 self.ymodes[iidx + x] = ymode;
560 }
561 iidx += self.ymode_stride;
562 }
563 }
564 fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) {
565 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
566 for _ in 0..4 {
567 for x in 0..4 {
568 self.mvs[iidx + x] = mv;
569 }
570 iidx += self.mb_w * 4;
571 }
572 }
573 fn get_frame_sign(&self, rframe: VP8Ref) -> bool {
574 match rframe {
575 VP8Ref::Golden => self.dstate.sign_bias[0],
576 VP8Ref::AltRef => self.dstate.sign_bias[1],
577 _ => false,
578 }
579 }
580 fn find_mv_pred(&self, mb_x: usize, mb_y: usize, frm_sign: bool) -> ([u8; 4], MV, MV, MV) {
581 const VP8_MV_PROBS: [[u8; 4]; 6] = [
582 [ 7, 1, 1, 143 ],
583 [ 14, 18, 14, 107 ],
584 [ 135, 64, 57, 68 ],
585 [ 60, 56, 128, 65 ],
586 [ 159, 134, 128, 34 ],
587 [ 234, 188, 128, 28 ]
588 ];
589
590 const OFFS: [(u8, u8, u8); 3] = [(0, 1, 2), (1, 0, 2), (1, 1, 1)];
591 let mut mvs = [ZERO_MV; 3];
592 let mut mvc = [0; 3];
593 let mut num_mv = 0;
594 let mut split_w = 0;
595
596 let mut nearest_mv = ZERO_MV;
597 let mut near_mv = ZERO_MV;
598
599 for &(x, y, weight) in OFFS.iter() {
600 let mv = if (x == 0 || mb_x > 0) && (y == 0 || mb_y > 0) {
601 let x = usize::from(x);
602 let y = usize::from(y);
603 let mb_idx = mb_x - x + (mb_y - y) * self.mb_w;
604 if self.mb_info[mb_idx].mb_type.is_intra() {
605 continue;
606 }
607 if self.mb_info[mb_idx].mb_type == VPMBType::InterFourMV {
608 split_w += weight;
609 }
610 let rsign = self.get_frame_sign(self.mb_info[mb_idx].rframe);
611 let mut mv_idx = mb_x * 4 + mb_y * 4 * self.mv_stride;
612 if y == 0 { // left
613 mv_idx += self.mv_stride * 3 - 1;
614 } else if x == 0 { // top
615 mv_idx -= self.mv_stride;
616 mv_idx += 3;
617 } else {
618 mv_idx -= self.mv_stride + 1;
619 }
620 if rsign == frm_sign {
621 self.mvs[mv_idx]
622 } else {
623 -self.mvs[mv_idx]
624 }
625 } else {
626 continue;
627 };
628 let mut found = false;
629 for i in 0..num_mv {
630 if mvs[i] == mv {
631 mvc[i] += weight;
632 found = true;
633 break;
634 }
635 }
636 if !found {
637 mvs[num_mv] = mv;
638 mvc[num_mv] = weight;
639 num_mv += 1;
640 }
641 }
642
643 match num_mv {
644 2 => {
645 if mvc[0] < mvc[1] {
646 mvs.swap(0, 1);
647 mvc.swap(0, 1);
648 }
649 },
650 3 => {
651 if mvc[1] < mvc[2] {
652 mvs.swap(1, 2);
653 mvc.swap(1, 2);
654 }
655 if mvc[0] < mvc[1] {
656 mvs.swap(0, 1);
657 mvc.swap(0, 1);
658 }
659 if mvc[1] < mvc[2] {
660 mvs.swap(1, 2);
661 mvc.swap(1, 2);
662 }
663 },
664 _ => {},
665 };
666
667 let mut best_mv = mvs[0];
668
669 let mut ct = [0; 4];
670 for (&mv, &count) in mvs[..num_mv].iter().zip(mvc.iter()) {
671 if mv != ZERO_MV {
672 if nearest_mv == ZERO_MV {
673 nearest_mv = mv;
674 if mvc[0] == count {
675 best_mv = mv;
676 }
677 ct[1] = count;
678 } else {
679 near_mv = mv;
680 ct[2] = count;
681 break;
682 }
683 }
684 }
685 for (&mv, &count) in mvs[..num_mv].iter().zip(mvc.iter()) {
686 if mv == ZERO_MV {
687 ct[0] = count;
688 break;
689 }
690 }
691 ct[3] = split_w;
692 let best_mv = self.clip_mv(best_mv, mb_x, mb_y);
693
694 let mvprobs = [VP8_MV_PROBS[ct[0] as usize][0],
695 VP8_MV_PROBS[ct[1] as usize][1],
696 VP8_MV_PROBS[ct[2] as usize][2],
697 VP8_MV_PROBS[ct[3] as usize][3]];
698
699 (mvprobs, self.clip_mv(nearest_mv, mb_x, mb_y), self.clip_mv(near_mv, mb_x, mb_y), best_mv)
700 }
701 fn clip_mv(&self, mv: MV, mb_x: usize, mb_y: usize) -> MV {
702 let pos_x = (mb_x as i32) * 16 * 4;
703 let pos_y = (mb_y as i32) * 16 * 4;
704 let mv_x = (pos_x + i32::from(mv.x)).max(-16 * 4).min((self.mb_w as i32) * 16 * 4);
705 let mv_y = (pos_y + i32::from(mv.y)).max(-16 * 4).min((self.mb_h as i32) * 16 * 4);
706 MV {x: (mv_x - pos_x) as i16, y: (mv_y - pos_y) as i16 }
707 }
708 fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV {
709 const SUB_MV_REF_PROBS: [[u8; 3]; 5] = [
710 [ 147, 136, 18 ],
711 [ 106, 145, 1 ],
712 [ 179, 121, 1 ],
713 [ 223, 1, 34 ],
714 [ 208, 1, 1 ]
715 ];
716
717 let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride;
718 let left_mv = if (mb_x > 0) || (bx > 0) {
719 self.mvs[mvidx - 1]
720 } else {
721 ZERO_MV
722 };
723 let top_mv = if (mb_y > 0) || (by > 0) {
724 self.mvs[mvidx - self.mv_stride]
725 } else {
726 ZERO_MV
727 };
728
729 let idx = if left_mv == top_mv {
730 if left_mv == ZERO_MV {
731 4
732 } else {
733 3
734 }
735 } else if top_mv == ZERO_MV {
736 2
737 } else if left_mv == ZERO_MV {
738 1
739 } else {
740 0
741 };
742 let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS[idx]);
743 match mode {
744 SubMVRef::Left => left_mv,
745 SubMVRef::Above => top_mv,
746 SubMVRef::Zero => ZERO_MV,
747 SubMVRef::New => {
748 let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]);
749 let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]);
750 pred_mv + MV{ x: dmx, y: dmy }
751 },
752 }
753 }
754 fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> {
755 let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS);
756 let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
757 match split_mode {
758 MVSplitMode::TopBottom => {
759 let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
760 for _ in 0..2 {
761 for x in 0..4 { self.mvs[mvidx + x] = top_mv; }
762 mvidx += self.mv_stride;
763 }
764 let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv);
765 for _ in 2..4 {
766 for x in 0..4 { self.mvs[mvidx + x] = bot_mv; }
767 mvidx += self.mv_stride;
768 }
769 },
770 MVSplitMode::LeftRight => {
771 let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
772 self.mvs[mvidx + 1] = left_mv;
773 let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv);
774 for _ in 0..4 {
775 self.mvs[mvidx + 0] = left_mv;
776 self.mvs[mvidx + 1] = left_mv;
777 self.mvs[mvidx + 2] = right_mv;
778 self.mvs[mvidx + 3] = right_mv;
779 mvidx += self.mv_stride;
780 }
781 },
782 MVSplitMode::Quarters => {
783 for y in (0..4).step_by(2) {
784 for x in (0..4).step_by(2) {
785 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
786 self.mvs[mvidx + x + 1] = self.mvs[mvidx + x];
787 }
788 for x in 0..4 {
789 self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x];
790 }
791 mvidx += self.mv_stride * 2;
792 }
793 },
794 MVSplitMode::Sixteenths => {
795 for y in 0..4 {
796 for x in 0..4 {
797 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
798 }
799 mvidx += self.mv_stride;
800 }
801 },
802 };
803 Ok(())
804 }
805
806 fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool) {
807 if do_luma {
808 let ydst = &mut dframe.data[dframe.offset[0]..];
809 let ystride = dframe.stride[0];
810 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
811 for y in 0..4 {
812 for x in 0..4 {
813 add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]);
814 }
815 yoff += 4 * ystride;
816 }
817 }
818 let dst = &mut dframe.data[0..];
819 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
820 let ustride = dframe.stride[1];
821 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
822 let vstride = dframe.stride[2];
823 for y in 0..2 {
824 for x in 0..2 {
825 add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]);
826 add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]);
827 }
828 uoff += ustride * 4;
829 voff += vstride * 4;
830 }
831 }
832 fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, mb_coeff_skip: bool) -> DecoderResult<()> {
833 let mb_idx = mb_x + mb_y * self.mb_w;
834 let has_top = mb_y > 0;
835 let has_left = mb_x > 0;
836 let ydst = &mut dframe.data[dframe.offset[0]..];
837 let ystride = dframe.stride[0];
838 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
839 let ipred_ctx_y = &mut self.dstate.ipred_ctx_y;
840 ipred_ctx_y.has_top = has_top;
841 ipred_ctx_y.has_left = has_left;
842 let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred;
843 if is_normal {
844 ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16);
845 if !has_top && self.mb_info[mb_idx].ymode == PredMode::VPred {
846 IPred16x16::ipred_const(ydst, yoff, ystride, 0x7F)
847 } else if !has_left && self.mb_info[mb_idx].ymode == PredMode::HPred {
848 IPred16x16::ipred_const(ydst, yoff, ystride, 0x81)
849 } else {
850 match self.mb_info[mb_idx].ymode {
851 PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y),
852 PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y),
853 PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y),
854 PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y),
855 _ => unreachable!(),
856 };
857 }
858 } else {
859 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
860 let mut tr_save = [0x7Fu8; 16];
861 let tr_edge = if has_top { ydst[yoff - ystride + 15] } else { 0x7F };
862 for y in 0..4 {
863 for x in 0..4 {
864 ipred_ctx_y.has_left = has_left || x > 0;
865 let bmode = self.ymodes[iidx + x];
866 let cur_yoff = yoff + x * 4;
867 let has_tr = ipred_ctx_y.has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1)));
868 let has_dl = ipred_ctx_y.has_left && (x == 0) && (y < 3);
869 ipred_ctx_y.fill(ydst, cur_yoff, ystride,
870 if has_tr { 8 } else { 4 },
871 if has_dl { 8 } else { 4 });
872
873 if !has_top && y == 0 && (has_left || x > 0) && bmode != PredMode::TMPred {
874 ipred_ctx_y.top = [0x7F; 16];
875 ipred_ctx_y.tl = 0x7F;
876 }
877 if !has_left && x == 0 && (has_top || y > 0) && bmode != PredMode::TMPred {
878 ipred_ctx_y.left = [0x81; 16];
879 ipred_ctx_y.tl = 0x81;
880 }
881 if !has_left && !has_top && x == 0 && y == 0 && bmode != PredMode::DCPred {
882 ipred_ctx_y.top = [0x7F; 16];
883 ipred_ctx_y.left = [0x81; 16];
884 ipred_ctx_y.tl = 0x7F;
885 }
886
887 if !has_tr {
888 for i in 0..4 {
889 ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i];
890 }
891 } else {
892 for i in 0..4 {
893 tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4];
894 }
895 }
896 if (mb_x == self.mb_w - 1) && has_top && (x == 3) {
897 for i in 0..4 {
898 ipred_ctx_y.top[i + 4] = tr_edge;
899 }
900 }
901 match bmode {
902 PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y),
903 PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y),
904 PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y),
905 PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y),
906 PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y),
907 PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y),
908 PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y),
909 PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y),
910 PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y),
911 PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y),
912 _ => unreachable!(),
913 };
914 if !mb_coeff_skip {
915 add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]);
916 }
917 }
918 ipred_ctx_y.has_top = true;
919 yoff += 4 * ystride;
920 iidx += self.ymode_stride;
921 }
922 }
923 let dst = &mut dframe.data[0..];
924 let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
925 let ustride = dframe.stride[1];
926 let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
927 let vstride = dframe.stride[2];
928 let ipred_ctx_u = &mut self.dstate.ipred_ctx_u;
929 let ipred_ctx_v = &mut self.dstate.ipred_ctx_v;
930 ipred_ctx_u.has_top = has_top;
931 ipred_ctx_v.has_top = has_top;
932 ipred_ctx_u.has_left = has_left;
933 ipred_ctx_v.has_left = has_left;
934 ipred_ctx_u.fill(dst, uoff, ustride, 8, 8);
935 ipred_ctx_v.fill(dst, voff, vstride, 8, 8);
936
937 if !has_top && self.mb_info[mb_idx].uvmode == PredMode::VPred {
938 IPred8x8::ipred_const(dst, uoff, ustride, 0x7F);
939 IPred8x8::ipred_const(dst, voff, vstride, 0x7F);
940 } else if !has_left && self.mb_info[mb_idx].uvmode == PredMode::HPred {
941 IPred8x8::ipred_const(dst, uoff, ustride, 0x81);
942 IPred8x8::ipred_const(dst, voff, vstride, 0x81);
943 } else {
944 match self.mb_info[mb_idx].uvmode {
945 PredMode::DCPred => {
946 IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u);
947 IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v);
948 },
949 PredMode::HPred => {
950 IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u);
951 IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v);
952 },
953 PredMode::VPred => {
954 IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u);
955 IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v);
956 },
957 PredMode::TMPred => {
958 IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u);
959 IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v);
960 },
961 _ => unreachable!(),
962 };
963 }
964 if !mb_coeff_skip {
965 self.add_residue(dframe, mb_x, mb_y, is_normal);
966 }
967 Ok(())
968 }
969 fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, mb_coeff_skip: bool, rframe: VP8Ref) {
970 let refframe = match rframe {
971 VP8Ref::Last => self.shuf.get_last(),
972 VP8Ref::Golden => self.shuf.get_golden(),
973 VP8Ref::AltRef => self.shuf.get_altref(),
974 VP8Ref::Intra => unreachable!(),
975 }.unwrap();
976 let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV;
977 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
978 let mut mc_buf = self.mc_buf.get_data_mut().unwrap();
979
980 let dst = &mut dframe.data[0..];
981 let ystride = dframe.stride[0];
982 let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
983 if single_mv {
984 if self.dstate.version == 0 {
985 mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16,
986 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf);
987 } else {
988 mc_block16x16_bilin(dst, yoff, ystride, mb_x * 16, mb_y * 16,
989 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf);
990 }
991 } else {
992 for y in 0..4 {
993 for x in 0..4 {
994 if self.dstate.version == 0 {
995 mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
996 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf);
997 } else {
998 mc_block4x4_bilin(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
999 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf);
1000 }
1001 }
1002 yoff += 4 * ystride;
1003 iidx += self.mv_stride;
1004 }
1005 }
1006
1007 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
1008 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
1009 let ustride = dframe.stride[1];
1010 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
1011 let vstride = dframe.stride[2];
1012 if single_mv {
1013 let mut chroma_mv = self.mvs[iidx];
1014
1015 if self.dstate.version == 0 {
1016 mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1017 mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, &mut mc_buf);
1018 } else {
1019 if self.dstate.version == 3 {
1020 chroma_mv.x &= !7;
1021 chroma_mv.y &= !7;
1022 }
1023 mc_block8x8_bilin(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1024 mc_block8x8_bilin(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, &mut mc_buf);
1025 }
1026 } else {
1027 for y in 0..2 {
1028 for x in 0..2 {
1029 let mut chroma_mv = self.mvs[iidx + x * 2] + self.mvs[iidx + x * 2 + 1]
1030 + self.mvs[iidx + x * 2 + self.mv_stride]
1031 + self.mvs[iidx + x * 2 + self.mv_stride + 1];
1032 if chroma_mv.x < 0 {
1033 chroma_mv.x += 1;
1034 } else {
1035 chroma_mv.x += 2;
1036 }
1037 if chroma_mv.y < 0 {
1038 chroma_mv.y += 1;
1039 } else {
1040 chroma_mv.y += 2;
1041 }
1042 chroma_mv.x >>= 2;
1043 chroma_mv.y >>= 2;
1044
1045 if self.dstate.version == 3 {
1046 chroma_mv.x &= !7;
1047 chroma_mv.y &= !7;
1048 }
1049
1050 if self.dstate.version == 0 {
1051 mc_block4x4(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1052 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1053 mc_block4x4(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1054 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1055 } else {
1056 mc_block4x4_bilin(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1057 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf);
1058 mc_block4x4_bilin(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
1059 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf);
1060 }
1061 }
1062 uoff += ustride * 4;
1063 voff += vstride * 4;
1064 iidx += 2 * self.mv_stride;
1065 }
1066 }
1067 if !mb_coeff_skip {
1068 self.add_residue(dframe, mb_x, mb_y, true);
1069 }
1070 }
1071 fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8, filter_inner: bool) {
1072 const HIGH_EDGE_VAR_THR: [[u8; 64]; 2] = [
1073 [
1074 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1075 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1076 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
1077 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
1078 ], [
1079 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1080 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1081 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
1082 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
1083 ]];
1084
1085 let inner_thr = if self.dstate.loop_sharpness == 0 {
1086 i16::from(loop_str)
1087 } else {
1088 let bound1 = i16::from(9 - self.dstate.loop_sharpness);
1089 let shift = (self.dstate.loop_sharpness + 3) >> 2;
1090 (i16::from(loop_str) >> shift).min(bound1).max(1)
1091 };
1092 let blk_thr = i16::from(loop_str) * 2 + inner_thr;
1093 let edge_thr = blk_thr + 4;
1094 let hev_thr = i16::from(HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize]);
1095
1096 let ystride = dframe.stride[0];
1097 let ustride = dframe.stride[1];
1098 let vstride = dframe.stride[2];
1099 let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
1100 let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride;
1101 let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride;
1102
1103 let (loop_edge, loop_inner) = if self.dstate.lf_simple {
1104 (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc)
1105 } else {
1106 (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc)
1107 };
1108
1109 if mb_x > 0 {
1110 loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr);
1111 if !self.dstate.lf_simple {
1112 loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr);
1113 loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr);
1114 }
1115 }
1116 if filter_inner {
1117 for x in 1..4 {
1118 loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, blk_thr, inner_thr, hev_thr);
1119 }
1120 if !self.dstate.lf_simple {
1121 loop_inner(dframe.data, upos + 4, 1, ustride, 8, blk_thr, inner_thr, hev_thr);
1122 loop_inner(dframe.data, vpos + 4, 1, vstride, 8, blk_thr, inner_thr, hev_thr);
1123 }
1124 }
1125
1126 if mb_y > 0 {
1127 loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr);
1128 if !self.dstate.lf_simple {
1129 loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr);
1130 loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr);
1131 }
1132 }
1133 if filter_inner {
1134 for y in 1..4 {
1135 loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, blk_thr, inner_thr, hev_thr);
1136 }
1137 if !self.dstate.lf_simple {
1138 loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, blk_thr, inner_thr, hev_thr);
1139 loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, blk_thr, inner_thr, hev_thr);
1140 }
1141 }
1142 }
1143 }
1144
1145 impl NADecoder for VP8Decoder {
1146 fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> {
1147 if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
1148 let fmt = YUV420_FORMAT;
1149 let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt);
1150 let myinfo = NACodecTypeInfo::Video(myvinfo);
1151 self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref();
1152
1153 supp.pool_u8.set_dec_bufs(5);
1154 supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?;
1155 self.set_dimensions(myvinfo.get_width(), myvinfo.get_height());
1156 Ok(())
1157 } else {
1158 Err(DecoderError::InvalidData)
1159 }
1160 }
1161 #[allow(clippy::cognitive_complexity)]
1162 fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> {
1163 let src = pkt.get_buffer();
1164 validate!(src.len() > 4);
1165
1166 let frame_tag = read_u24le(src.as_slice())?;
1167 self.dstate.is_intra = (frame_tag & 1) == 0;
1168 self.dstate.version = ((frame_tag >> 1) & 7) as u8;
1169 validate!(self.dstate.version <= 3);
1170 let _show_frame = ((frame_tag >> 4) & 1) != 0;
1171 let part1_off = if self.dstate.is_intra { 10 } else { 3 };
1172 let part2_off = (frame_tag >> 5) as usize;
1173 validate!(src.len() >= part2_off && part2_off > part1_off);
1174
1175 if self.dstate.is_intra {
1176 validate!(src.len() > 10);
1177 let marker = read_u24be(&src[3..6])?;
1178 validate!(marker == 0x9D012A);
1179 let width_ = read_u16le(&src[6..8])?;
1180 let height_ = read_u16le(&src[8..10])?;
1181 let width = ((width_ + 1) & 0x3FFE) as usize;
1182 let height = ((height_ + 1) & 0x3FFE) as usize;
1183 // let hscale = width_ >> 14;
1184 // let vscale = height_ >> 14;
1185
1186 validate!((width > 0) && (height > 0));
1187 self.set_dimensions(width, height);
1188
1189 self.dstate.reset();
1190 } else {
1191 if !self.shuf.has_refs() {
1192 return Err(DecoderError::MissingReference);
1193 }
1194 }
1195
1196 let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?;
1197
1198 if self.dstate.is_intra {
1199 let _color_space = bc.read_bool();
1200 let _clamping_type = bc.read_bool();
1201 }
1202
1203 self.dstate.segmentation = bc.read_bool();
1204 if self.dstate.segmentation {
1205 self.update_segmentation(&mut bc)?;
1206 } else {
1207 self.dstate.update_seg_map = false;
1208 self.dstate.force_quant = None;
1209 self.dstate.force_loop_str = None;
1210 }
1211
1212 self.dstate.lf_simple = bc.read_bool();
1213 self.dstate.loop_filter_level = bc.read_bits(6) as u8;
1214 self.dstate.loop_sharpness = bc.read_bits(3) as u8;
1215
1216 self.mb_lf_adjustments(&mut bc)?;
1217
1218 let num_partitions = 1 << bc.read_bits(2);
1219
1220 self.quant_indices(&mut bc)?;
1221
1222 let (keep_probs, update_last, update_gf, update_ar) = if self.dstate.is_intra {
1223 let refresh_entropy_probs = bc.read_bool();
1224 (refresh_entropy_probs, true, 4, 4)
1225 } else {
1226 let refresh_golden_frame = bc.read_bool();
1227 let refresh_alternate_frame = bc.read_bool();
1228 let copy_to_golden = if !refresh_golden_frame {
1229 bc.read_bits(2)
1230 } else { 4 };
1231 validate!(copy_to_golden != 3);
1232 let copy_to_altref = if !refresh_alternate_frame {
1233 bc.read_bits(2)
1234 } else { 4 };
1235 validate!(copy_to_altref != 3);
1236 self.dstate.sign_bias[0] = bc.read_bool();
1237 self.dstate.sign_bias[1] = bc.read_bool();
1238 let refresh_entropy_probs = bc.read_bool();
1239 let refresh_last = bc.read_bool();
1240 (refresh_entropy_probs, refresh_last, copy_to_golden, copy_to_altref)
1241 };
1242
1243 if !keep_probs {
1244 self.dstate.save(&mut self.tmp_probs);
1245 }
1246
1247 self.read_dct_coef_prob_upd(&mut bc)?;
1248
1249 let mb_no_coeff_skip = bc.read_bool();
1250 let prob_skip_false = bc.read_byte();
1251
1252 if !self.dstate.is_intra {
1253 self.dstate.prob_intra_pred = bc.read_byte();
1254 self.dstate.prob_last_pred = bc.read_byte();
1255 self.dstate.prob_gold_pred = bc.read_byte();
1256 if bc.read_bool() {
1257 for i in 0..4 {
1258 self.dstate.kf_ymode_prob[i] = bc.read_byte();
1259 }
1260 }
1261 if bc.read_bool() {
1262 for i in 0..3 {
1263 self.dstate.kf_uvmode_prob[i] = bc.read_byte();
1264 }
1265 }
1266 self.read_mv_prob_upd(&mut bc)?;
1267 }
1268
1269 let mut data_start = part1_off + part2_off + (num_partitions - 1) * 3;
1270 let mut part_offs = [0; 8];
1271 validate!(data_start <= src.len());
1272 let mut size = src.len() - data_start;
1273 for i in 0..num_partitions - 1 {
1274 let len = read_u24le(&src[part1_off + part2_off + i * 3..][..3])? as usize;
1275 validate!(size >= len);
1276 part_offs[i] = data_start;
1277 data_start += len;
1278 size -= len;
1279 }
1280 part_offs[num_partitions - 1] = data_start;
1281 for start in part_offs[num_partitions..].iter_mut() {
1282 *start = data_start;
1283 }
1284 let mut bc_src = unsafe {
1285 let mut arr: [BoolCoder; 8] = std::mem::MaybeUninit::uninit().assume_init();
1286 for (bc, &off) in arr.iter_mut().zip(part_offs.iter()) {
1287 std::ptr::write(bc, BoolCoder::new(&src[off..]).unwrap());
1288 }
1289 arr
1290 };
1291
1292 let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT);
1293 let ret = supp.pool_u8.get_free();
1294 if ret.is_none() {
1295 return Err(DecoderError::AllocError);
1296 }
1297 let mut buf = ret.unwrap();
1298 if buf.get_info() != vinfo {
1299 self.shuf.clear();
1300 supp.pool_u8.reset();
1301 supp.pool_u8.prealloc_video(vinfo, 4)?;
1302 let ret = supp.pool_u8.get_free();
1303 if ret.is_none() {
1304 return Err(DecoderError::AllocError);
1305 }
1306 buf = ret.unwrap();
1307 }
1308 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap();
1309
1310 let mut mb_idx = 0;
1311 self.pcache.reset();
1312 let mut rframe = VP8Ref::Last;
1313 let loop_filter = self.dstate.version != 3 && self.dstate.loop_filter_level > 0;
1314 for mb_y in 0..self.mb_h {
1315 let bc_main = &mut bc_src[mb_y & (num_partitions - 1)];
1316 for mb_x in 0..self.mb_w {
1317 if self.dstate.update_seg_map {
1318 self.decode_mb_features(&mut bc, mb_idx)?;
1319 }
1320 if self.dstate.segmentation {
1321 self.set_cur_segment(mb_idx);
1322 }
1323 let mb_coeff_skip = if mb_no_coeff_skip {
1324 bc.read_prob(prob_skip_false)
1325 } else { false };
1326 self.dstate.has_y2 = true;
1327 if self.dstate.is_intra {
1328 let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS);
1329 if ymode == PredMode::BPred {
1330 self.dstate.has_y2 = false;
1331 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1332 for y in 0..4 {
1333 for x in 0..4 {
1334 let top_mode = if (y > 0) || (mb_y > 0) {
1335 self.ymodes[iidx + x - self.ymode_stride]
1336 } else {
1337 PredMode::DCPred
1338 };
1339 let left_mode = if (x > 0) || (mb_x > 0) {
1340 self.ymodes[iidx + x - 1]
1341 } else {
1342 PredMode::DCPred
1343 };
1344 let top_idx = top_mode.to_b_index();
1345 let left_idx = left_mode.to_b_index();
1346 let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]);
1347 self.ymodes[iidx + x] = bmode;
1348 }
1349 iidx += self.ymode_stride;
1350 }
1351 } else {
1352 self.fill_ymode(mb_x, mb_y, ymode);
1353 }
1354 let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS);
1355 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1356 self.mb_info[mb_idx].ymode = ymode;
1357 self.mb_info[mb_idx].uvmode = uvmode;
1358 self.mb_info[mb_idx].rframe = VP8Ref::Intra;
1359 } else if !bc.read_prob(self.dstate.prob_intra_pred) {
1360 let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob);
1361 if ymode == PredMode::BPred {
1362 self.dstate.has_y2 = false;
1363 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1364 for _y in 0..4 {
1365 for x in 0..4 {
1366 let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS);
1367 self.ymodes[iidx + x] = bmode;
1368 }
1369 iidx += self.ymode_stride;
1370 }
1371 } else {
1372 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1373 }
1374 let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob);
1375 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1376 self.mb_info[mb_idx].ymode = ymode;
1377 self.mb_info[mb_idx].uvmode = uvmode;
1378 self.mb_info[mb_idx].rframe = VP8Ref::Intra;
1379 self.fill_mv(mb_x, mb_y, ZERO_MV);
1380 } else {
1381 rframe = if !bc.read_prob(self.dstate.prob_last_pred) {
1382 VP8Ref::Last
1383 } else if !bc.read_prob(self.dstate.prob_gold_pred) {
1384 VP8Ref::Golden
1385 } else {
1386 VP8Ref::AltRef
1387 };
1388
1389 let frm_sign = self.get_frame_sign(rframe);
1390 let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y, frm_sign);
1391 let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs);
1392
1393 match mbtype {
1394 VPMBType::InterNearest => {
1395 self.fill_mv(mb_x, mb_y, nearest_mv);
1396 },
1397 VPMBType::InterNear => {
1398 self.fill_mv(mb_x, mb_y, near_mv);
1399 },
1400 VPMBType::InterNoMV => {
1401 self.fill_mv(mb_x, mb_y, ZERO_MV);
1402 },
1403 VPMBType::InterMV => {
1404 let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]);
1405 let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]);
1406 let new_mv = pred_mv + MV{ x: dmx, y: dmy };
1407 self.fill_mv(mb_x, mb_y, new_mv);
1408 },
1409 VPMBType::InterFourMV => {
1410 self.dstate.has_y2 = false;
1411 self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?;
1412 },
1413 _ => unreachable!(),
1414 };
1415
1416 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1417 self.mb_info[mb_idx].mb_type = mbtype;
1418 self.mb_info[mb_idx].ymode = PredMode::Inter;
1419 self.mb_info[mb_idx].uvmode = PredMode::Inter;
1420 self.mb_info[mb_idx].rframe = rframe;
1421 }
1422 let has_coeffs = if !mb_coeff_skip {
1423 self.decode_residue(bc_main, mb_x)
1424 } else {
1425 let y2_left = self.pcache.y2_pred_left;
1426 let y2_top = self.pcache.y2_pred.data[self.pcache.y2_pred.xpos + mb_x - self.pcache.y2_pred.stride];
1427 self.pcache.reset_left();
1428 if !self.dstate.has_y2 {
1429 self.pcache.y2_pred_left = y2_left;
1430 self.pcache.y2_pred.data[self.pcache.y2_pred.xpos + mb_x] = y2_top;
1431 }
1432 false
1433 };
1434 match self.mb_info[mb_idx].mb_type {
1435 VPMBType::Intra => {
1436 self.recon_intra_mb(&mut dframe, mb_x, mb_y, mb_coeff_skip)?;
1437 },
1438 _ => {
1439 self.recon_inter_mb(&mut dframe, mb_x, mb_y, mb_coeff_skip, rframe);
1440 },
1441 }
1442 if loop_filter {
1443 if let Some(loop_str) = self.dstate.force_loop_str {
1444 self.mb_info[mb_idx].loop_str = loop_str;
1445 } else {
1446 self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level;
1447 }
1448 if self.dstate.lf_delta {
1449 let mut loop_str = self.mb_info[mb_idx].loop_str as i8;
1450 let idx = match self.mb_info[mb_idx].rframe {
1451 VP8Ref::Intra => 0,
1452 VP8Ref::Last => 1,
1453 VP8Ref::Golden => 2,
1454 VP8Ref::AltRef => 3,
1455 };
1456 loop_str += self.dstate.lf_frame_delta[idx];
1457 let idx = match self.mb_info[mb_idx].mb_type {
1458 VPMBType::Intra => 0,
1459 VPMBType::InterNoMV => 1,
1460 VPMBType::InterFourMV => 3,
1461 _ => 2,
1462 };
1463 if self.mb_info[mb_idx].mb_type != VPMBType::Intra || self.mb_info[mb_idx].ymode == PredMode::BPred {
1464 loop_str += self.dstate.lf_mode_delta[idx];
1465 }
1466 self.mb_info[mb_idx].loop_str = loop_str.max(0).min(63) as u8;
1467 }
1468 self.mb_info[mb_idx].inner_filt = has_coeffs || (self.mb_info[mb_idx].ymode == PredMode::BPred) || (self.mb_info[mb_idx].mb_type == VPMBType::InterFourMV);
1469 }
1470 mb_idx += 1;
1471 }
1472 self.pcache.update_row();
1473 self.pcache.reset_left();
1474 }
1475 if loop_filter {
1476 let mut mb_idx = 0;
1477 for mb_y in 0..self.mb_h {
1478 for mb_x in 0..self.mb_w {
1479 let loop_str = self.mb_info[mb_idx].loop_str;
1480 if loop_str > 0 {
1481 self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str, self.mb_info[mb_idx].inner_filt);
1482 }
1483 mb_idx += 1;
1484 }
1485 }
1486 }
1487
1488 if !keep_probs {
1489 self.dstate.restore(&self.tmp_probs);
1490 }
1491
1492 match update_ar {
1493 1 => {
1494 let last = self.shuf.get_last().unwrap();
1495 self.shuf.add_altref_frame(last);
1496 },
1497 2 => {
1498 let golden = self.shuf.get_golden().unwrap();
1499 self.shuf.add_altref_frame(golden);
1500 },
1501 _ => {},
1502 };
1503 match update_gf {
1504 4 => self.shuf.add_golden_frame(buf.clone()),
1505 1 => {
1506 let last = self.shuf.get_last().unwrap();
1507 self.shuf.add_golden_frame(last);
1508 },
1509 2 => {
1510 let altref = self.shuf.get_altref().unwrap();
1511 self.shuf.add_golden_frame(altref);
1512 },
1513 _ => {},
1514 };
1515 if update_ar == 4 {
1516 self.shuf.add_altref_frame(buf.clone());
1517 }
1518 if update_last {
1519 self.shuf.add_frame(buf.clone());
1520 }
1521
1522 let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf));
1523 frm.set_keyframe(self.dstate.is_intra);
1524 frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P });
1525 Ok(frm.into_ref())
1526 }
1527 fn flush(&mut self) {
1528 self.shuf.clear();
1529 }
1530 }
1531
1532 impl NAOptionHandler for VP8Decoder {
1533 fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] }
1534 fn set_options(&mut self, _options: &[NAOption]) { }
1535 fn query_option_value(&self, _name: &str) -> Option<NAValue> { None }
1536 }
1537
1538 pub fn get_decoder() -> Box<dyn NADecoder + Send> {
1539 Box::new(VP8Decoder::new())
1540 }
1541
1542 #[cfg(test)]
1543 mod test {
1544 use nihav_core::codecs::RegisteredDecoders;
1545 use nihav_core::demuxers::RegisteredDemuxers;
1546 use nihav_codec_support::test::dec_video::*;
1547 use crate::duck_register_all_decoders;
1548 use crate::duck_register_all_demuxers;
1549
1550 fn test_vp8_core(name: &str, hash: [u32; 4]) {
1551 let mut dmx_reg = RegisteredDemuxers::new();
1552 duck_register_all_demuxers(&mut dmx_reg);
1553 let mut dec_reg = RegisteredDecoders::new();
1554 duck_register_all_decoders(&mut dec_reg);
1555
1556 test_decoding("ivf", "vp8", name, None, &dmx_reg,
1557 &dec_reg, ExpectedTestResult::MD5(hash));
1558 }
1559
1560 #[test]
1561 fn test_vp8_01() {
1562 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-001.ivf",
1563 [0xfad12607, 0x4e1bd536, 0x3d43b9d1, 0xcadddb71]);
1564 }
1565 #[test]
1566 fn test_vp8_02() {
1567 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-002.ivf",
1568 [0x182f03dd, 0x264ebac0, 0x4e24c7c9, 0x499d7cdb]);
1569 }
1570 #[test]
1571 fn test_vp8_03() {
1572 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-003.ivf",
1573 [0xe5fe668b, 0x03390002, 0x2c3eb0ba, 0x76a44bd1]);
1574 }
1575 #[test]
1576 fn test_vp8_04() {
1577 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-004.ivf",
1578 [0x95097ce9, 0x808c1d47, 0xe03f99c4, 0x8ad111ec]);
1579 }
1580 #[test]
1581 fn test_vp8_05() {
1582 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-005.ivf",
1583 [0x0f469e4f, 0xd1dea533, 0xe5580688, 0xb2d242ff]);
1584 }
1585 /*#[test]
1586 fn test_vp8_06() {
1587 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-006.ivf",
1588 [0;4]);
1589 }*/
1590 #[test]
1591 fn test_vp8_07() {
1592 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-007.ivf",
1593 [0x92526913, 0xd89b6a9b, 0x00f2d602, 0xdef08bce]);
1594 }
1595 #[test]
1596 fn test_vp8_08() {
1597 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-008.ivf",
1598 [0x1676d1eb, 0x19bd175e, 0xc5bb10f5, 0xd49f24f1]);
1599 }
1600 #[test]
1601 fn test_vp8_09() {
1602 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-009.ivf",
1603 [0x19201a2d, 0x535bd82f, 0x41c1a565, 0x8def5379]);
1604 }
1605 #[test]
1606 fn test_vp8_10() {
1607 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-010.ivf",
1608 [0x61d05919, 0xa9883d9f, 0x215eb3f2, 0xdb63eb13]);
1609 }
1610 #[test]
1611 fn test_vp8_11() {
1612 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-011.ivf",
1613 [0x1a0afe5e, 0x70512a03, 0x323a8f11, 0x76bcf022]);
1614 }
1615 #[test]
1616 fn test_vp8_12() {
1617 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-012.ivf",
1618 [0x4ea997c8, 0x0dc2087e, 0x6deec81f, 0x1ecf6668]);
1619 }
1620 #[test]
1621 fn test_vp8_13() {
1622 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-013.ivf",
1623 [0x93169305, 0xd3054327, 0xbe3cc074, 0xf0773a75]);
1624 }
1625 /*#[test]
1626 fn test_vp8_14() {
1627 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-014.ivf",
1628 [0;4]);
1629 }*/
1630 #[test]
1631 fn test_vp8_15() {
1632 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-015.ivf",
1633 [0x23b9cc58, 0x2e344726, 0xe76cda09, 0x2b416bcf]);
1634 }
1635 #[test]
1636 fn test_vp8_16() {
1637 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-016.ivf",
1638 [0x55e889d2, 0x2f99718c, 0xf6936d55, 0xf8ade12b]);
1639 }
1640 #[test]
1641 fn test_vp8_17() {
1642 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-017.ivf",
1643 [0x95a68ffb, 0x228d1d8c, 0x6ee54f16, 0xa10fb9eb]);
1644 }
1645 }
1646
1647 const DC_QUANTS: [i16; 128] = [
1648 4, 5, 6, 7, 8, 9, 10, 10,
1649 11, 12, 13, 14, 15, 16, 17, 17,
1650 18, 19, 20, 20, 21, 21, 22, 22,
1651 23, 23, 24, 25, 25, 26, 27, 28,
1652 29, 30, 31, 32, 33, 34, 35, 36,
1653 37, 37, 38, 39, 40, 41, 42, 43,
1654 44, 45, 46, 46, 47, 48, 49, 50,
1655 51, 52, 53, 54, 55, 56, 57, 58,
1656 59, 60, 61, 62, 63, 64, 65, 66,
1657 67, 68, 69, 70, 71, 72, 73, 74,
1658 75, 76, 76, 77, 78, 79, 80, 81,
1659 82, 83, 84, 85, 86, 87, 88, 89,
1660 91, 93, 95, 96, 98, 100, 101, 102,
1661 104, 106, 108, 110, 112, 114, 116, 118,
1662 122, 124, 126, 128, 130, 132, 134, 136,
1663 138, 140, 143, 145, 148, 151, 154, 157
1664 ];
1665
1666 const AC_QUANTS: [i16; 128] = [
1667 4, 5, 6, 7, 8, 9, 10, 11,
1668 12, 13, 14, 15, 16, 17, 18, 19,
1669 20, 21, 22, 23, 24, 25, 26, 27,
1670 28, 29, 30, 31, 32, 33, 34, 35,
1671 36, 37, 38, 39, 40, 41, 42, 43,
1672 44, 45, 46, 47, 48, 49, 50, 51,
1673 52, 53, 54, 55, 56, 57, 58, 60,
1674 62, 64, 66, 68, 70, 72, 74, 76,
1675 78, 80, 82, 84, 86, 88, 90, 92,
1676 94, 96, 98, 100, 102, 104, 106, 108,
1677 110, 112, 114, 116, 119, 122, 125, 128,
1678 131, 134, 137, 140, 143, 146, 149, 152,
1679 155, 158, 161, 164, 167, 170, 173, 177,
1680 181, 185, 189, 193, 197, 201, 205, 209,
1681 213, 217, 221, 225, 229, 234, 239, 245,
1682 249, 254, 259, 264, 269, 274, 279, 284
1683 ];