add LinePack decoder
[nihav.git] / nihav-duck / src / codecs / vp8.rs
CommitLineData
d0d21988
KS
1use nihav_core::codecs::*;
2use nihav_core::io::byteio::*;
3use nihav_codec_support::codecs::{MV, ZERO_MV};
4use super::vpcommon::*;
5use super::vp78::*;
6use super::vp78data::*;
7use super::vp78dsp::*;
8use super::vp8dsp::*;
9
e6aaad5c 10#[derive(Clone,Copy,PartialEq,Debug,Default)]
d0d21988 11enum VP8Ref {
e6aaad5c 12 #[default]
d0d21988
KS
13 Intra,
14 Last,
15 Golden,
16 AltRef,
17}
18
d0d21988
KS
19#[derive(Default)]
20pub struct VP8Shuffler {
21 lastframe: Option<NAVideoBufferRef<u8>>,
22 goldframe: Option<NAVideoBufferRef<u8>>,
23 altframe: Option<NAVideoBufferRef<u8>>,
24}
25
26impl VP8Shuffler {
27 pub fn new() -> Self { Self::default() }
28 pub fn clear(&mut self) {
29 self.lastframe = None;
30 self.goldframe = None;
31 self.altframe = None;
32 }
33 pub fn add_frame(&mut self, buf: NAVideoBufferRef<u8>) {
34 self.lastframe = Some(buf);
35 }
36 pub fn add_golden_frame(&mut self, buf: NAVideoBufferRef<u8>) {
37 self.goldframe = Some(buf);
38 }
39 pub fn add_altref_frame(&mut self, buf: NAVideoBufferRef<u8>) {
40 self.altframe = Some(buf);
41 }
42 pub fn get_last(&mut self) -> Option<NAVideoBufferRef<u8>> {
e6aaad5c 43 self.lastframe.as_ref().cloned()
d0d21988
KS
44 }
45 pub fn get_golden(&mut self) -> Option<NAVideoBufferRef<u8>> {
e6aaad5c 46 self.goldframe.as_ref().cloned()
d0d21988
KS
47 }
48 pub fn get_altref(&mut self) -> Option<NAVideoBufferRef<u8>> {
e6aaad5c 49 self.altframe.as_ref().cloned()
d0d21988
KS
50 }
51 pub fn has_refs(&self) -> bool {
52 self.lastframe.is_some()
53 }
54}
55
56struct SBParams<'a> {
57 coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4],
58 qmat: &'a [i16; 16],
59}
60
61pub const COEF_NE_TREE: &[VPTreeDef<DCTToken>] = &[
62 VPTreeDef::Value(DCTToken::Zero), VPTreeDef::Index(2),
63 VPTreeDef::Value(DCTToken::One), VPTreeDef::Index(4),
64 VPTreeDef::Index(6), VPTreeDef::Index(10),
65 VPTreeDef::Value(DCTToken::Two), VPTreeDef::Index(8),
66 VPTreeDef::Value(DCTToken::Three), VPTreeDef::Value(DCTToken::Four),
67 VPTreeDef::Index(12), VPTreeDef::Index(14),
68 VPTreeDef::Value(DCTToken::Cat1), VPTreeDef::Value(DCTToken::Cat2),
69 VPTreeDef::Index(16), VPTreeDef::Index(18),
70 VPTreeDef::Value(DCTToken::Cat3), VPTreeDef::Value(DCTToken::Cat4),
71 VPTreeDef::Value(DCTToken::Cat5), VPTreeDef::Value(DCTToken::Cat6)
72];
73
74fn decode_subblock(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 {
75 const COEF_BANDS: [usize; 16] = [ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7 ];
76
77 let mut has_nz = 0;
78 let start = if ctype != 0 { 0 } else { 1 };
79 *coeffs = [0; 16];
80 let mut cval = pctx as usize;
81 for idx in start..16 {
82 let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval];
83 let tok = if cval != 0 || idx == start {
84 bc.read_tree(COEF_TREE, probs)
85 } else {
86 bc.read_tree(COEF_NE_TREE, &probs[1..])
87 };
88 if tok == DCTToken::EOB { break; }
89 let level = expand_token(bc, tok);
90 coeffs[DEFAULT_SCAN_ORDER[idx]] = level.wrapping_mul(sbparams.qmat[idx]);
91 cval = level.abs().min(2) as usize;
92 has_nz |= cval;
93 }
94 if has_nz > 0 { 1 } else { 0 }
95}
96
97#[derive(Clone,Copy,Default)]
98struct MBInfo {
99 mb_type: VPMBType,
100 ymode: PredMode,
101 uvmode: PredMode,
102 loop_str: u8,
103 inner_filt: bool,
104 rframe: VP8Ref,
105}
106
107#[derive(Clone,Copy,Default)]
108struct Segment {
109 quant: i8,
110 lf: i8,
111}
112
113#[derive(Default)]
114struct SavedProbs {
115 kf_ymode_prob: [u8; 4],
116 kf_uvmode_prob: [u8; 3],
117
118 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
119 mv_probs: [[u8; 19]; 2],
120
121 segment_probs: [u8; 3],
122}
123
124#[derive(Default)]
125struct DecoderState {
126 lf_simple: bool,
127 loop_filter_level: u8,
128 loop_sharpness: u8,
129
130 is_intra: bool,
131 version: u8,
132
133 kf_ymode_prob: [u8; 4],
134 kf_uvmode_prob: [u8; 3],
135
136 prob_intra_pred: u8,
137 prob_last_pred: u8,
138 prob_gold_pred: u8,
139 sign_bias: [bool; 2],
140
141 coef_probs: [[[[u8; 11]; 3]; 8]; 4],
142 mv_probs: [[u8; 19]; 2],
143
144 segmentation: bool,
145 update_seg_map: bool,
146 force_quant: Option<u8>,
147 force_loop_str: Option<u8>,
148 segment_probs: [u8; 3],
149 seg: [Segment; 4],
150 seg_feature_mode: bool,
151
152 lf_delta: bool,
153 lf_frame_delta: [i8; 4],
154 lf_mode_delta: [i8; 4],
155
156 has_y2: bool,
157
158 ipred_ctx_y: IPredContext,
159 ipred_ctx_u: IPredContext,
160 ipred_ctx_v: IPredContext,
161}
162
163impl DecoderState {
164 fn reset(&mut self) {
165 const VP8_DEFAULT_MV_PROBS: [[u8; 19]; 2] = [
166 [ 162, 128, 225, 146, 172, 147, 214, 39, 156, 128, 129, 132, 75, 145, 178, 206, 239, 254, 254 ],
167 [ 164, 128, 204, 170, 119, 235, 140, 230, 228, 128, 130, 130, 74, 148, 180, 203, 236, 254, 254 ]
168 ];
169
170 self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS);
171 self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS);
172 self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS);
173 self.mv_probs.copy_from_slice(&VP8_DEFAULT_MV_PROBS);
174 self.segment_probs = [255; 3];
175 self.seg = [Segment::default(); 4];
176 }
177 fn restore(&mut self, dst: &SavedProbs) {
178 self.kf_ymode_prob = dst.kf_ymode_prob;
179 self.kf_uvmode_prob = dst.kf_uvmode_prob;
180 self.coef_probs = dst.coef_probs;
181 self.mv_probs = dst.mv_probs;
182 self.segment_probs = dst.segment_probs;
183 }
184 fn save(&self, dst: &mut SavedProbs) {
185 dst.kf_ymode_prob = self.kf_ymode_prob;
186 dst.kf_uvmode_prob = self.kf_uvmode_prob;
187 dst.coef_probs = self.coef_probs;
188 dst.mv_probs = self.mv_probs;
189// dst.segment_probs = self.segment_probs;
190 }
191}
192
193fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 19]) -> i16 {
194 const LONG_VECTOR_ORDER: [usize; 9] = [ 0, 1, 2, 9, 8, 7, 6, 5, 4 ];
195
196 let val = if !bc.read_prob(probs[0]) {
197 bc.read_tree(SMALL_MV_TREE, &probs[2..9])
198 } else {
199 let raw_probs = &probs[9..];
200 let mut raw = 0;
201 for ord in LONG_VECTOR_ORDER.iter() {
202 raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord;
203 }
204 if (raw & 0x3F0) != 0 {
205 raw |= (bc.read_prob(raw_probs[3]) as i16) << 3;
206 } else {
207 raw |= 1 << 3;
208 }
209 raw
210 };
211 if (val == 0) || !bc.read_prob(probs[1]) {
212 val
213 } else {
214 -val
215 }
216}
217
218struct VP8Decoder {
219 info: NACodecInfoRef,
220
221 shuf: VP8Shuffler,
222 width: usize,
223 height: usize,
224 mb_w: usize,
225 mb_h: usize,
226 mb_info: Vec<MBInfo>,
227 mvs: Vec<MV>,
228 mv_stride: usize,
229
230 ymodes: Vec<PredMode>,
231 ymode_stride: usize,
232 uvmodes: Vec<PredMode>,
233 uvmode_stride: usize,
234
235 dstate: DecoderState,
236 pcache: PredCache,
237 tmp_probs: SavedProbs,
238
239 coeffs: [[i16; 16]; 25],
240 qmat: [[[i16; 16]; 3]; 5],
241
242 mc_buf: NAVideoBufferRef<u8>,
243
244 seg_map: Vec<u8>,
245}
246
247impl VP8Decoder {
248 fn new() -> Self {
249 let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap();
250 let mc_buf = vt.get_vbuf().unwrap();
251 Self {
252 info: NACodecInfoRef::default(),
253
254 shuf: VP8Shuffler::new(),
255 width: 0,
256 height: 0,
257 mb_w: 0,
258 mb_h: 0,
259 mb_info: Vec::new(),
260 mvs: Vec::new(),
261 mv_stride: 0,
262
263 ymodes: Vec::new(),
264 ymode_stride: 0,
265 uvmodes: Vec::new(),
266 uvmode_stride: 0,
267
268 dstate: DecoderState::default(),
269 pcache: PredCache::new(),
270 tmp_probs: SavedProbs::default(),
271
272 coeffs: [[0; 16]; 25],
273 qmat: [[[0; 16]; 3]; 5],
274
275 mc_buf,
276
277 seg_map: Vec::new(),
278 }
279 }
280 fn set_dimensions(&mut self, width: usize, height: usize) {
281 if (width == self.width) && (height == self.height) {
282 return;
283 }
284 self.width = width;
285 self.height = height;
286 self.mb_w = (self.width + 15) >> 4;
287 self.mb_h = (self.height + 15) >> 4;
288 self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default());
289 self.mv_stride = self.mb_w * 4;
290 self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV);
291
292 self.ymode_stride = self.mb_w * 4;
293 self.uvmode_stride = self.mb_w;
294 self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default());
295 self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default());
296
297 self.pcache.resize(self.mb_w);
298
299 self.seg_map.clear();
300 self.seg_map.resize(self.mb_w * self.mb_h, 0);
301 }
302 fn update_segmentation(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
303 self.dstate.update_seg_map = bc.read_bool();
304 if bc.read_bool() {
305 self.dstate.seg_feature_mode = bc.read_bool();
306 for seg in self.dstate.seg.iter_mut() {
307 if bc.read_bool() {
308 let quant_upd_val = bc.read_bits(7) as i8;
309 let quant_upd_sign = bc.read_bool();
310 seg.quant = if !quant_upd_sign { quant_upd_val } else { -quant_upd_val };
311 }
312 }
313 for seg in self.dstate.seg.iter_mut() {
314 if bc.read_bool() {
315 let lf_upd_val = bc.read_bits(6) as i8;
316 let lf_upd_sign = bc.read_bool();
317 seg.lf = if !lf_upd_sign { lf_upd_val } else { -lf_upd_val };
318 }
319 }
320 }
321 if self.dstate.update_seg_map {
322 self.tmp_probs.segment_probs = self.dstate.segment_probs;
323 for prob in self.dstate.segment_probs.iter_mut() {
324 if bc.read_bool() {
325 *prob = bc.read_byte();
326 }
327 }
328 }
329 Ok(())
330 }
331 fn mb_lf_adjustments(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
332 self.dstate.lf_delta = bc.read_bool();
333 if self.dstate.lf_delta {
334 if bc.read_bool() {
335 for frame_delta in self.dstate.lf_frame_delta.iter_mut() {
336 if bc.read_bool() {
337 let delta_magn = bc.read_bits(6) as i8;
338 let delta_sign = bc.read_bool();
339 *frame_delta = if !delta_sign { delta_magn } else { -delta_magn };
340 }
341 }
342 for mode_delta in self.dstate.lf_mode_delta.iter_mut() {
343 if bc.read_bool() {
344 let delta_magn = bc.read_bits(6) as i8;
345 let delta_sign = bc.read_bool();
346 *mode_delta = if !delta_sign { delta_magn } else { -delta_magn };
347 }
348 }
349 }
350 }
351 Ok(())
352 }
353 fn read_delta_quant(bc: &mut BoolCoder, y_ac_q: usize) -> DecoderResult<usize> {
354 if bc.read_bool() {
355 let delta = bc.read_bits(4) as usize;
356 if bc.read_bool() {
357 Ok(y_ac_q.saturating_sub(delta))
358 } else {
359 Ok((y_ac_q + delta).min(127))
360 }
361 } else {
362 Ok(y_ac_q)
363 }
364 }
365 fn quant_indices(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
366 let y_ac_q = bc.read_bits(7) as usize;
367 let y_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
368 let y2_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
369 let y2_ac_q = Self::read_delta_quant(bc, y_ac_q)?;
370 let uv_dc_q = Self::read_delta_quant(bc, y_ac_q)?;
371 let uv_ac_q = Self::read_delta_quant(bc, y_ac_q)?;
372 self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
373
374 Ok(())
375 }
376 fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
377 for i in 0..4 {
378 for j in 0..8 {
379 for k in 0..3 {
380 for l in 0..11 {
381 if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) {
382 self.dstate.coef_probs[i][j][k][l] = bc.read_byte();
383 }
384 }
385 }
386 }
387 }
388 Ok(())
389 }
390 fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> {
391 const MV_UPDATE_PROBS: [[u8; 19]; 2] = [
392 [ 237, 246, 253, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 250, 250, 252, 254, 254 ],
393 [ 231, 243, 245, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 251, 251, 254, 254, 254 ]
394 ];
395 for comp in 0..2 {
396 for i in 0..19 {
397 if bc.read_prob(MV_UPDATE_PROBS[comp][i]) {
398 self.dstate.mv_probs[comp][i] = bc.read_probability();
399 }
400 }
401 }
402 Ok(())
403 }
404 fn decode_mb_features(&mut self, bc: &mut BoolCoder, mb_idx: usize) -> DecoderResult<()> {
405 let segment_id = bc.read_tree(FEATURE_TREE, &self.dstate.segment_probs);
406 self.seg_map[mb_idx] = segment_id as u8;
407
408 Ok(())
409 }
410 fn set_cur_segment(&mut self, mb_idx: usize) {
411 self.dstate.force_quant = Some(self.seg_map[mb_idx]);
412 let seg_id = self.seg_map[mb_idx] as usize;
413 let segment = &self.dstate.seg[seg_id];
414 let loop_str = if self.dstate.seg_feature_mode {
415 segment.lf as u8
416 } else {
417 (i16::from(self.dstate.loop_filter_level) + i16::from(segment.lf)).max(0).min(63) as u8
418 };
419 self.dstate.force_loop_str = Some(loop_str);
420 }
421 fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize) -> bool {
422 let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 };
423 let mut sbparams = SBParams {
424 qmat: &self.qmat[qmat_idx][2],
425 coef_probs: &self.dstate.coef_probs,
426 };
427 let mut has_ac = [false; 25];
428 let mut coded = false;
429 let ytype;
430 if self.dstate.has_y2 {
431 let pred = &self.pcache.y2_pred;
432 let pidx = pred.xpos + mb_x;
433 let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride];
434
435 let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams);
436 self.pcache.y2_pred.data[pidx] = has_nz;
437 self.pcache.y2_pred_left = has_nz;
438 has_ac[24] = has_nz > 0;
439 coded |= has_ac[24] | (self.coeffs[24][0] != 0);
440
441 ytype = 0;
442 } else {
443 let pred = &mut self.pcache.y2_pred;
444 let pidx = pred.xpos + mb_x;
445 pred.data[pidx] = pred.data[pidx - pred.stride];
446
447 ytype = 3;
448 }
449 sbparams.qmat = &self.qmat[qmat_idx][0];
450 for i in 0..16 {
451 let bx = i & 3;
452 let by = i >> 2;
453 let pred = &self.pcache.y_pred;
454 let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride;
455 let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride];
456
457 let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams);
458 self.pcache.y_pred.data[pidx] = has_nz;
459 self.pcache.y_pred_left[by] = has_nz;
460 has_ac[i] = has_nz > 0;
461 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
462 }
463 sbparams.qmat = &self.qmat[qmat_idx][1];
464 for i in 16..20 {
465 let bx = i & 1;
466 let by = (i >> 1) & 1;
467 let pred = &self.pcache.u_pred;
468 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
469 let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride];
470
471 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
472 self.pcache.u_pred.data[pidx] = has_nz;
473 self.pcache.u_pred_left[by] = has_nz;
474 has_ac[i] = has_nz > 0;
475 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
476 }
477 for i in 20..24 {
478 let bx = i & 1;
479 let by = (i >> 1) & 1;
480 let pred = &self.pcache.v_pred;
481 let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride;
482 let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride];
483
484 let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams);
485 self.pcache.v_pred.data[pidx] = has_nz;
486 self.pcache.v_pred_left[by] = has_nz;
487 has_ac[i] = has_nz > 0;
488 coded |= has_ac[i] | (self.coeffs[i][0] != 0);
489 }
490
491 if self.dstate.has_y2 {
492 let y2block = &mut self.coeffs[24];
493 if has_ac[24] {
494 iwht4x4(y2block);
495 } else if y2block[0] != 0 {
496 iwht4x4_dc(y2block);
497 }
498 for i in 0..16 {
499 self.coeffs[i][0] = self.coeffs[24][i];
500 }
501 }
502 for i in 0..24 {
503 if has_ac[i] {
504 idct4x4(&mut self.coeffs[i]);
505 } else if self.coeffs[i][0] != 0 {
506 idct4x4_dc(&mut self.coeffs[i]);
507 }
508 }
509
510 coded
511 }
512
513 fn set_single_qmat(qmat: &mut [[i16; 16]; 3], y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
514 qmat[0][0] = DC_QUANTS[y_dc_q];
515 for i in 1..16 {
516 qmat[0][i] = AC_QUANTS[y_ac_q];
517 }
518 qmat[1][0] = DC_QUANTS[uv_dc_q].min(132);
519 for i in 1..16 {
520 qmat[1][i] = AC_QUANTS[uv_ac_q];
521 }
522 qmat[2][0] = DC_QUANTS[y2_dc_q] * 2;
523 for i in 1..16 {
524 qmat[2][i] = (i32::from(AC_QUANTS[y2_ac_q]) * 155 / 100).max(8) as i16;
525 }
526 }
527 fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) {
528 Self::set_single_qmat(&mut self.qmat[0], y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q);
529 if self.dstate.segmentation {
530 for (qmat, seg) in self.qmat[1..].iter_mut().zip(self.dstate.seg.iter()) {
531 let q = if self.dstate.seg_feature_mode {
532 seg.quant.max(0) as usize
533 } else {
534 ((y_ac_q as i16) + i16::from(seg.quant)).max(0).min(127) as usize
535 };
536 Self::set_single_qmat(qmat, q, q, q, q, q, q);
537 }
538 }
539 }
540 fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) {
541 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
542 for _ in 0..4 {
543 for x in 0..4 {
544 self.ymodes[iidx + x] = ymode;
545 }
546 iidx += self.ymode_stride;
547 }
548 }
549 fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) {
550 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
551 for _ in 0..4 {
552 for x in 0..4 {
553 self.mvs[iidx + x] = mv;
554 }
555 iidx += self.mb_w * 4;
556 }
557 }
558 fn get_frame_sign(&self, rframe: VP8Ref) -> bool {
559 match rframe {
560 VP8Ref::Golden => self.dstate.sign_bias[0],
561 VP8Ref::AltRef => self.dstate.sign_bias[1],
562 _ => false,
563 }
564 }
565 fn find_mv_pred(&self, mb_x: usize, mb_y: usize, frm_sign: bool) -> ([u8; 4], MV, MV, MV) {
566 const VP8_MV_PROBS: [[u8; 4]; 6] = [
567 [ 7, 1, 1, 143 ],
568 [ 14, 18, 14, 107 ],
569 [ 135, 64, 57, 68 ],
570 [ 60, 56, 128, 65 ],
571 [ 159, 134, 128, 34 ],
572 [ 234, 188, 128, 28 ]
573 ];
574
575 const OFFS: [(u8, u8, u8); 3] = [(0, 1, 2), (1, 0, 2), (1, 1, 1)];
576 let mut mvs = [ZERO_MV; 3];
577 let mut mvc = [0; 3];
578 let mut num_mv = 0;
579 let mut split_w = 0;
580
581 let mut nearest_mv = ZERO_MV;
582 let mut near_mv = ZERO_MV;
583
584 for &(x, y, weight) in OFFS.iter() {
585 let mv = if (x == 0 || mb_x > 0) && (y == 0 || mb_y > 0) {
586 let x = usize::from(x);
587 let y = usize::from(y);
588 let mb_idx = mb_x - x + (mb_y - y) * self.mb_w;
589 if self.mb_info[mb_idx].mb_type.is_intra() {
590 continue;
591 }
592 if self.mb_info[mb_idx].mb_type == VPMBType::InterFourMV {
593 split_w += weight;
594 }
595 let rsign = self.get_frame_sign(self.mb_info[mb_idx].rframe);
596 let mut mv_idx = mb_x * 4 + mb_y * 4 * self.mv_stride;
597 if y == 0 { // left
598 mv_idx += self.mv_stride * 3 - 1;
599 } else if x == 0 { // top
600 mv_idx -= self.mv_stride;
601 mv_idx += 3;
602 } else {
603 mv_idx -= self.mv_stride + 1;
604 }
605 if rsign == frm_sign {
606 self.mvs[mv_idx]
607 } else {
608 -self.mvs[mv_idx]
609 }
610 } else {
611 continue;
612 };
613 let mut found = false;
614 for i in 0..num_mv {
615 if mvs[i] == mv {
616 mvc[i] += weight;
617 found = true;
618 break;
619 }
620 }
621 if !found {
622 mvs[num_mv] = mv;
623 mvc[num_mv] = weight;
624 num_mv += 1;
625 }
626 }
627
628 match num_mv {
629 2 => {
630 if mvc[0] < mvc[1] {
631 mvs.swap(0, 1);
632 mvc.swap(0, 1);
633 }
634 },
635 3 => {
636 if mvc[1] < mvc[2] {
637 mvs.swap(1, 2);
638 mvc.swap(1, 2);
639 }
640 if mvc[0] < mvc[1] {
641 mvs.swap(0, 1);
642 mvc.swap(0, 1);
643 }
644 if mvc[1] < mvc[2] {
645 mvs.swap(1, 2);
646 mvc.swap(1, 2);
647 }
648 },
649 _ => {},
650 };
651
652 let mut best_mv = mvs[0];
653
654 let mut ct = [0; 4];
655 for (&mv, &count) in mvs[..num_mv].iter().zip(mvc.iter()) {
656 if mv != ZERO_MV {
657 if nearest_mv == ZERO_MV {
658 nearest_mv = mv;
659 if mvc[0] == count {
660 best_mv = mv;
661 }
662 ct[1] = count;
663 } else {
664 near_mv = mv;
665 ct[2] = count;
666 break;
667 }
668 }
669 }
670 for (&mv, &count) in mvs[..num_mv].iter().zip(mvc.iter()) {
671 if mv == ZERO_MV {
672 ct[0] = count;
673 break;
674 }
675 }
676 ct[3] = split_w;
677 let best_mv = self.clip_mv(best_mv, mb_x, mb_y);
678
679 let mvprobs = [VP8_MV_PROBS[ct[0] as usize][0],
680 VP8_MV_PROBS[ct[1] as usize][1],
681 VP8_MV_PROBS[ct[2] as usize][2],
682 VP8_MV_PROBS[ct[3] as usize][3]];
683
684 (mvprobs, self.clip_mv(nearest_mv, mb_x, mb_y), self.clip_mv(near_mv, mb_x, mb_y), best_mv)
685 }
686 fn clip_mv(&self, mv: MV, mb_x: usize, mb_y: usize) -> MV {
687 let pos_x = (mb_x as i32) * 16 * 4;
688 let pos_y = (mb_y as i32) * 16 * 4;
689 let mv_x = (pos_x + i32::from(mv.x)).max(-16 * 4).min((self.mb_w as i32) * 16 * 4);
690 let mv_y = (pos_y + i32::from(mv.y)).max(-16 * 4).min((self.mb_h as i32) * 16 * 4);
691 MV {x: (mv_x - pos_x) as i16, y: (mv_y - pos_y) as i16 }
692 }
693 fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV {
694 const SUB_MV_REF_PROBS: [[u8; 3]; 5] = [
695 [ 147, 136, 18 ],
696 [ 106, 145, 1 ],
697 [ 179, 121, 1 ],
698 [ 223, 1, 34 ],
699 [ 208, 1, 1 ]
700 ];
701
702 let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride;
703 let left_mv = if (mb_x > 0) || (bx > 0) {
704 self.mvs[mvidx - 1]
705 } else {
706 ZERO_MV
707 };
708 let top_mv = if (mb_y > 0) || (by > 0) {
709 self.mvs[mvidx - self.mv_stride]
710 } else {
711 ZERO_MV
712 };
713
714 let idx = if left_mv == top_mv {
715 if left_mv == ZERO_MV {
716 4
717 } else {
718 3
719 }
720 } else if top_mv == ZERO_MV {
721 2
722 } else if left_mv == ZERO_MV {
723 1
724 } else {
725 0
726 };
727 let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS[idx]);
728 match mode {
729 SubMVRef::Left => left_mv,
730 SubMVRef::Above => top_mv,
731 SubMVRef::Zero => ZERO_MV,
732 SubMVRef::New => {
733 let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]);
734 let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]);
735 pred_mv + MV{ x: dmx, y: dmy }
736 },
737 }
738 }
739 fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> {
740 let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS);
741 let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
742 match split_mode {
743 MVSplitMode::TopBottom => {
744 let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
745 for _ in 0..2 {
746 for x in 0..4 { self.mvs[mvidx + x] = top_mv; }
747 mvidx += self.mv_stride;
748 }
749 let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv);
750 for _ in 2..4 {
751 for x in 0..4 { self.mvs[mvidx + x] = bot_mv; }
752 mvidx += self.mv_stride;
753 }
754 },
755 MVSplitMode::LeftRight => {
756 let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv);
757 self.mvs[mvidx + 1] = left_mv;
758 let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv);
759 for _ in 0..4 {
760 self.mvs[mvidx + 0] = left_mv;
761 self.mvs[mvidx + 1] = left_mv;
762 self.mvs[mvidx + 2] = right_mv;
763 self.mvs[mvidx + 3] = right_mv;
764 mvidx += self.mv_stride;
765 }
766 },
767 MVSplitMode::Quarters => {
768 for y in (0..4).step_by(2) {
769 for x in (0..4).step_by(2) {
770 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
771 self.mvs[mvidx + x + 1] = self.mvs[mvidx + x];
772 }
773 for x in 0..4 {
774 self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x];
775 }
776 mvidx += self.mv_stride * 2;
777 }
778 },
779 MVSplitMode::Sixteenths => {
780 for y in 0..4 {
781 for x in 0..4 {
782 self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv);
783 }
784 mvidx += self.mv_stride;
785 }
786 },
787 };
788 Ok(())
789 }
790
791 fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool) {
792 if do_luma {
793 let ydst = &mut dframe.data[dframe.offset[0]..];
794 let ystride = dframe.stride[0];
795 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
796 for y in 0..4 {
797 for x in 0..4 {
798 add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]);
799 }
800 yoff += 4 * ystride;
801 }
802 }
803 let dst = &mut dframe.data[0..];
804 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
805 let ustride = dframe.stride[1];
806 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
807 let vstride = dframe.stride[2];
808 for y in 0..2 {
809 for x in 0..2 {
810 add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]);
811 add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]);
812 }
813 uoff += ustride * 4;
814 voff += vstride * 4;
815 }
816 }
817 fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, mb_coeff_skip: bool) -> DecoderResult<()> {
818 let mb_idx = mb_x + mb_y * self.mb_w;
819 let has_top = mb_y > 0;
820 let has_left = mb_x > 0;
821 let ydst = &mut dframe.data[dframe.offset[0]..];
822 let ystride = dframe.stride[0];
823 let mut yoff = mb_x * 16 + mb_y * 16 * ystride;
824 let ipred_ctx_y = &mut self.dstate.ipred_ctx_y;
825 ipred_ctx_y.has_top = has_top;
826 ipred_ctx_y.has_left = has_left;
827 let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred;
828 if is_normal {
829 ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16);
830 if !has_top && self.mb_info[mb_idx].ymode == PredMode::VPred {
831 IPred16x16::ipred_const(ydst, yoff, ystride, 0x7F)
832 } else if !has_left && self.mb_info[mb_idx].ymode == PredMode::HPred {
833 IPred16x16::ipred_const(ydst, yoff, ystride, 0x81)
834 } else {
835 match self.mb_info[mb_idx].ymode {
836 PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y),
837 PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y),
838 PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y),
839 PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y),
840 _ => unreachable!(),
841 };
842 }
843 } else {
844 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
845 let mut tr_save = [0x7Fu8; 16];
846 let tr_edge = if has_top { ydst[yoff - ystride + 15] } else { 0x7F };
847 for y in 0..4 {
848 for x in 0..4 {
849 ipred_ctx_y.has_left = has_left || x > 0;
850 let bmode = self.ymodes[iidx + x];
851 let cur_yoff = yoff + x * 4;
852 let has_tr = ipred_ctx_y.has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1)));
853 let has_dl = ipred_ctx_y.has_left && (x == 0) && (y < 3);
854 ipred_ctx_y.fill(ydst, cur_yoff, ystride,
855 if has_tr { 8 } else { 4 },
856 if has_dl { 8 } else { 4 });
857
858 if !has_top && y == 0 && (has_left || x > 0) && bmode != PredMode::TMPred {
859 ipred_ctx_y.top = [0x7F; 16];
860 ipred_ctx_y.tl = 0x7F;
861 }
862 if !has_left && x == 0 && (has_top || y > 0) && bmode != PredMode::TMPred {
863 ipred_ctx_y.left = [0x81; 16];
864 ipred_ctx_y.tl = 0x81;
865 }
866 if !has_left && !has_top && x == 0 && y == 0 && bmode != PredMode::DCPred {
867 ipred_ctx_y.top = [0x7F; 16];
868 ipred_ctx_y.left = [0x81; 16];
869 ipred_ctx_y.tl = 0x7F;
870 }
871
872 if !has_tr {
873 for i in 0..4 {
874 ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i];
875 }
876 } else {
877 for i in 0..4 {
878 tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4];
879 }
880 }
881 if (mb_x == self.mb_w - 1) && has_top && (x == 3) {
882 for i in 0..4 {
883 ipred_ctx_y.top[i + 4] = tr_edge;
884 }
885 }
886 match bmode {
887 PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y),
888 PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y),
889 PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y),
890 PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y),
891 PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y),
892 PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y),
893 PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y),
894 PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y),
895 PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y),
896 PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y),
897 _ => unreachable!(),
898 };
899 if !mb_coeff_skip {
900 add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]);
901 }
902 }
903 ipred_ctx_y.has_top = true;
904 yoff += 4 * ystride;
905 iidx += self.ymode_stride;
906 }
907 }
908 let dst = &mut dframe.data[0..];
909 let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
910 let ustride = dframe.stride[1];
911 let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
912 let vstride = dframe.stride[2];
913 let ipred_ctx_u = &mut self.dstate.ipred_ctx_u;
914 let ipred_ctx_v = &mut self.dstate.ipred_ctx_v;
915 ipred_ctx_u.has_top = has_top;
916 ipred_ctx_v.has_top = has_top;
917 ipred_ctx_u.has_left = has_left;
918 ipred_ctx_v.has_left = has_left;
919 ipred_ctx_u.fill(dst, uoff, ustride, 8, 8);
920 ipred_ctx_v.fill(dst, voff, vstride, 8, 8);
921
922 if !has_top && self.mb_info[mb_idx].uvmode == PredMode::VPred {
923 IPred8x8::ipred_const(dst, uoff, ustride, 0x7F);
924 IPred8x8::ipred_const(dst, voff, vstride, 0x7F);
925 } else if !has_left && self.mb_info[mb_idx].uvmode == PredMode::HPred {
926 IPred8x8::ipred_const(dst, uoff, ustride, 0x81);
927 IPred8x8::ipred_const(dst, voff, vstride, 0x81);
928 } else {
929 match self.mb_info[mb_idx].uvmode {
930 PredMode::DCPred => {
931 IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u);
932 IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v);
933 },
934 PredMode::HPred => {
935 IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u);
936 IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v);
937 },
938 PredMode::VPred => {
939 IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u);
940 IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v);
941 },
942 PredMode::TMPred => {
943 IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u);
944 IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v);
945 },
946 _ => unreachable!(),
947 };
948 }
949 if !mb_coeff_skip {
950 self.add_residue(dframe, mb_x, mb_y, is_normal);
951 }
952 Ok(())
953 }
954 fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, mb_coeff_skip: bool, rframe: VP8Ref) {
955 let refframe = match rframe {
956 VP8Ref::Last => self.shuf.get_last(),
957 VP8Ref::Golden => self.shuf.get_golden(),
958 VP8Ref::AltRef => self.shuf.get_altref(),
959 VP8Ref::Intra => unreachable!(),
960 }.unwrap();
961 let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV;
962 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
6f263099 963 let mc_buf = self.mc_buf.get_data_mut().unwrap();
d0d21988
KS
964
965 let dst = &mut dframe.data[0..];
966 let ystride = dframe.stride[0];
967 let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
968 if single_mv {
969 if self.dstate.version == 0 {
970 mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16,
6f263099 971 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, mc_buf);
d0d21988
KS
972 } else {
973 mc_block16x16_bilin(dst, yoff, ystride, mb_x * 16, mb_y * 16,
6f263099 974 self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, mc_buf);
d0d21988
KS
975 }
976 } else {
977 for y in 0..4 {
978 for x in 0..4 {
979 if self.dstate.version == 0 {
980 mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
6f263099 981 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, mc_buf);
d0d21988
KS
982 } else {
983 mc_block4x4_bilin(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4,
6f263099 984 self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, mc_buf);
d0d21988
KS
985 }
986 }
987 yoff += 4 * ystride;
988 iidx += self.mv_stride;
989 }
990 }
991
992 let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride;
993 let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1];
994 let ustride = dframe.stride[1];
995 let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2];
996 let vstride = dframe.stride[2];
997 if single_mv {
998 let mut chroma_mv = self.mvs[iidx];
999
1000 if self.dstate.version == 0 {
6f263099
KS
1001 mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, mc_buf);
1002 mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, mc_buf);
d0d21988
KS
1003 } else {
1004 if self.dstate.version == 3 {
1005 chroma_mv.x &= !7;
1006 chroma_mv.y &= !7;
1007 }
6f263099
KS
1008 mc_block8x8_bilin(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, mc_buf);
1009 mc_block8x8_bilin(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, mc_buf);
d0d21988
KS
1010 }
1011 } else {
1012 for y in 0..2 {
1013 for x in 0..2 {
1014 let mut chroma_mv = self.mvs[iidx + x * 2] + self.mvs[iidx + x * 2 + 1]
1015 + self.mvs[iidx + x * 2 + self.mv_stride]
1016 + self.mvs[iidx + x * 2 + self.mv_stride + 1];
1017 if chroma_mv.x < 0 {
1018 chroma_mv.x += 1;
1019 } else {
1020 chroma_mv.x += 2;
1021 }
1022 if chroma_mv.y < 0 {
1023 chroma_mv.y += 1;
1024 } else {
1025 chroma_mv.y += 2;
1026 }
1027 chroma_mv.x >>= 2;
1028 chroma_mv.y >>= 2;
1029
1030 if self.dstate.version == 3 {
1031 chroma_mv.x &= !7;
1032 chroma_mv.y &= !7;
1033 }
1034
1035 if self.dstate.version == 0 {
1036 mc_block4x4(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
6f263099 1037 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, mc_buf);
d0d21988 1038 mc_block4x4(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
6f263099 1039 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, mc_buf);
d0d21988
KS
1040 } else {
1041 mc_block4x4_bilin(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
6f263099 1042 chroma_mv.x, chroma_mv.y, refframe.clone(), 1, mc_buf);
d0d21988 1043 mc_block4x4_bilin(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4,
6f263099 1044 chroma_mv.x, chroma_mv.y, refframe.clone(), 2, mc_buf);
d0d21988
KS
1045 }
1046 }
1047 uoff += ustride * 4;
1048 voff += vstride * 4;
1049 iidx += 2 * self.mv_stride;
1050 }
1051 }
1052 if !mb_coeff_skip {
1053 self.add_residue(dframe, mb_x, mb_y, true);
1054 }
1055 }
1056 fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8, filter_inner: bool) {
1057 const HIGH_EDGE_VAR_THR: [[u8; 64]; 2] = [
1058 [
1059 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1060 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1061 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
1062 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
1063 ], [
1064 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1065 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1066 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
1067 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
1068 ]];
1069
1070 let inner_thr = if self.dstate.loop_sharpness == 0 {
1071 i16::from(loop_str)
1072 } else {
1073 let bound1 = i16::from(9 - self.dstate.loop_sharpness);
1074 let shift = (self.dstate.loop_sharpness + 3) >> 2;
1075 (i16::from(loop_str) >> shift).min(bound1).max(1)
1076 };
1077 let blk_thr = i16::from(loop_str) * 2 + inner_thr;
1078 let edge_thr = blk_thr + 4;
1079 let hev_thr = i16::from(HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize]);
1080
1081 let ystride = dframe.stride[0];
1082 let ustride = dframe.stride[1];
1083 let vstride = dframe.stride[2];
1084 let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride;
1085 let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride;
1086 let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride;
1087
1088 let (loop_edge, loop_inner) = if self.dstate.lf_simple {
1089 (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc)
1090 } else {
1091 (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc)
1092 };
1093
1094 if mb_x > 0 {
1095 loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr);
1096 if !self.dstate.lf_simple {
1097 loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr);
1098 loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr);
1099 }
1100 }
1101 if filter_inner {
1102 for x in 1..4 {
1103 loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, blk_thr, inner_thr, hev_thr);
1104 }
1105 if !self.dstate.lf_simple {
1106 loop_inner(dframe.data, upos + 4, 1, ustride, 8, blk_thr, inner_thr, hev_thr);
1107 loop_inner(dframe.data, vpos + 4, 1, vstride, 8, blk_thr, inner_thr, hev_thr);
1108 }
1109 }
1110
1111 if mb_y > 0 {
1112 loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr);
1113 if !self.dstate.lf_simple {
1114 loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr);
1115 loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr);
1116 }
1117 }
1118 if filter_inner {
1119 for y in 1..4 {
1120 loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, blk_thr, inner_thr, hev_thr);
1121 }
1122 if !self.dstate.lf_simple {
1123 loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, blk_thr, inner_thr, hev_thr);
1124 loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, blk_thr, inner_thr, hev_thr);
1125 }
1126 }
1127 }
1128}
1129
1130impl NADecoder for VP8Decoder {
1131 fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> {
1132 if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
1133 let fmt = YUV420_FORMAT;
1134 let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt);
1135 let myinfo = NACodecTypeInfo::Video(myvinfo);
1136 self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref();
1137
1138 supp.pool_u8.set_dec_bufs(5);
1139 supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?;
1140 self.set_dimensions(myvinfo.get_width(), myvinfo.get_height());
1141 Ok(())
1142 } else {
1143 Err(DecoderError::InvalidData)
1144 }
1145 }
1146 #[allow(clippy::cognitive_complexity)]
1147 fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> {
1148 let src = pkt.get_buffer();
1149 validate!(src.len() > 4);
1150
1151 let frame_tag = read_u24le(src.as_slice())?;
1152 self.dstate.is_intra = (frame_tag & 1) == 0;
1153 self.dstate.version = ((frame_tag >> 1) & 7) as u8;
1154 validate!(self.dstate.version <= 3);
1155 let _show_frame = ((frame_tag >> 4) & 1) != 0;
1156 let part1_off = if self.dstate.is_intra { 10 } else { 3 };
1157 let part2_off = (frame_tag >> 5) as usize;
1158 validate!(src.len() >= part2_off && part2_off > part1_off);
1159
1160 if self.dstate.is_intra {
1161 validate!(src.len() > 10);
1162 let marker = read_u24be(&src[3..6])?;
1163 validate!(marker == 0x9D012A);
1164 let width_ = read_u16le(&src[6..8])?;
1165 let height_ = read_u16le(&src[8..10])?;
1166 let width = ((width_ + 1) & 0x3FFE) as usize;
1167 let height = ((height_ + 1) & 0x3FFE) as usize;
1168// let hscale = width_ >> 14;
1169// let vscale = height_ >> 14;
1170
1171 validate!((width > 0) && (height > 0));
1172 self.set_dimensions(width, height);
1173
1174 self.dstate.reset();
1175 } else {
1176 if !self.shuf.has_refs() {
1177 return Err(DecoderError::MissingReference);
1178 }
1179 }
1180
1181 let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?;
1182
1183 if self.dstate.is_intra {
1184 let _color_space = bc.read_bool();
1185 let _clamping_type = bc.read_bool();
1186 }
1187
1188 self.dstate.segmentation = bc.read_bool();
1189 if self.dstate.segmentation {
1190 self.update_segmentation(&mut bc)?;
1191 } else {
1192 self.dstate.update_seg_map = false;
1193 self.dstate.force_quant = None;
1194 self.dstate.force_loop_str = None;
1195 }
1196
1197 self.dstate.lf_simple = bc.read_bool();
1198 self.dstate.loop_filter_level = bc.read_bits(6) as u8;
1199 self.dstate.loop_sharpness = bc.read_bits(3) as u8;
1200
1201 self.mb_lf_adjustments(&mut bc)?;
1202
1203 let num_partitions = 1 << bc.read_bits(2);
1204
1205 self.quant_indices(&mut bc)?;
1206
1207 let (keep_probs, update_last, update_gf, update_ar) = if self.dstate.is_intra {
1208 let refresh_entropy_probs = bc.read_bool();
1209 (refresh_entropy_probs, true, 4, 4)
1210 } else {
1211 let refresh_golden_frame = bc.read_bool();
1212 let refresh_alternate_frame = bc.read_bool();
1213 let copy_to_golden = if !refresh_golden_frame {
1214 bc.read_bits(2)
1215 } else { 4 };
1216 validate!(copy_to_golden != 3);
1217 let copy_to_altref = if !refresh_alternate_frame {
1218 bc.read_bits(2)
1219 } else { 4 };
1220 validate!(copy_to_altref != 3);
1221 self.dstate.sign_bias[0] = bc.read_bool();
1222 self.dstate.sign_bias[1] = bc.read_bool();
1223 let refresh_entropy_probs = bc.read_bool();
1224 let refresh_last = bc.read_bool();
1225 (refresh_entropy_probs, refresh_last, copy_to_golden, copy_to_altref)
1226 };
1227
1228 if !keep_probs {
1229 self.dstate.save(&mut self.tmp_probs);
1230 }
1231
1232 self.read_dct_coef_prob_upd(&mut bc)?;
1233
1234 let mb_no_coeff_skip = bc.read_bool();
1235 let prob_skip_false = bc.read_byte();
1236
1237 if !self.dstate.is_intra {
1238 self.dstate.prob_intra_pred = bc.read_byte();
1239 self.dstate.prob_last_pred = bc.read_byte();
1240 self.dstate.prob_gold_pred = bc.read_byte();
1241 if bc.read_bool() {
1242 for i in 0..4 {
1243 self.dstate.kf_ymode_prob[i] = bc.read_byte();
1244 }
1245 }
1246 if bc.read_bool() {
1247 for i in 0..3 {
1248 self.dstate.kf_uvmode_prob[i] = bc.read_byte();
1249 }
1250 }
1251 self.read_mv_prob_upd(&mut bc)?;
1252 }
1253
1254 let mut data_start = part1_off + part2_off + (num_partitions - 1) * 3;
1255 let mut part_offs = [0; 8];
1256 validate!(data_start <= src.len());
1257 let mut size = src.len() - data_start;
1258 for i in 0..num_partitions - 1 {
1259 let len = read_u24le(&src[part1_off + part2_off + i * 3..][..3])? as usize;
1260 validate!(size >= len);
1261 part_offs[i] = data_start;
1262 data_start += len;
1263 size -= len;
1264 }
1265 part_offs[num_partitions - 1] = data_start;
1266 for start in part_offs[num_partitions..].iter_mut() {
1267 *start = data_start;
1268 }
2954519d
KS
1269 let mut bc_src = Vec::new();
1270 for &off in part_offs.iter() {
1271 bc_src.push(BoolCoder::new(&src[off..]).unwrap());
1272 }
d0d21988
KS
1273
1274 let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT);
1275 let ret = supp.pool_u8.get_free();
1276 if ret.is_none() {
1277 return Err(DecoderError::AllocError);
1278 }
1279 let mut buf = ret.unwrap();
1280 if buf.get_info() != vinfo {
1281 self.shuf.clear();
1282 supp.pool_u8.reset();
1283 supp.pool_u8.prealloc_video(vinfo, 4)?;
1284 let ret = supp.pool_u8.get_free();
1285 if ret.is_none() {
1286 return Err(DecoderError::AllocError);
1287 }
1288 buf = ret.unwrap();
1289 }
1290 let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap();
1291
1292 let mut mb_idx = 0;
1293 self.pcache.reset();
1294 let mut rframe = VP8Ref::Last;
1295 let loop_filter = self.dstate.version != 3 && self.dstate.loop_filter_level > 0;
1296 for mb_y in 0..self.mb_h {
1297 let bc_main = &mut bc_src[mb_y & (num_partitions - 1)];
1298 for mb_x in 0..self.mb_w {
1299 if self.dstate.update_seg_map {
1300 self.decode_mb_features(&mut bc, mb_idx)?;
1301 }
1302 if self.dstate.segmentation {
1303 self.set_cur_segment(mb_idx);
1304 }
1305 let mb_coeff_skip = if mb_no_coeff_skip {
1306 bc.read_prob(prob_skip_false)
1307 } else { false };
1308 self.dstate.has_y2 = true;
1309 if self.dstate.is_intra {
1310 let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS);
1311 if ymode == PredMode::BPred {
1312 self.dstate.has_y2 = false;
1313 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1314 for y in 0..4 {
1315 for x in 0..4 {
1316 let top_mode = if (y > 0) || (mb_y > 0) {
1317 self.ymodes[iidx + x - self.ymode_stride]
1318 } else {
1319 PredMode::DCPred
1320 };
1321 let left_mode = if (x > 0) || (mb_x > 0) {
1322 self.ymodes[iidx + x - 1]
1323 } else {
1324 PredMode::DCPred
1325 };
1326 let top_idx = top_mode.to_b_index();
1327 let left_idx = left_mode.to_b_index();
1328 let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]);
1329 self.ymodes[iidx + x] = bmode;
1330 }
1331 iidx += self.ymode_stride;
1332 }
1333 } else {
1334 self.fill_ymode(mb_x, mb_y, ymode);
1335 }
1336 let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS);
1337 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1338 self.mb_info[mb_idx].ymode = ymode;
1339 self.mb_info[mb_idx].uvmode = uvmode;
1340 self.mb_info[mb_idx].rframe = VP8Ref::Intra;
1341 } else if !bc.read_prob(self.dstate.prob_intra_pred) {
1342 let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob);
1343 if ymode == PredMode::BPred {
1344 self.dstate.has_y2 = false;
1345 let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride;
1346 for _y in 0..4 {
1347 for x in 0..4 {
1348 let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS);
1349 self.ymodes[iidx + x] = bmode;
1350 }
1351 iidx += self.ymode_stride;
1352 }
1353 } else {
1354 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1355 }
1356 let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob);
1357 self.mb_info[mb_idx].mb_type = VPMBType::Intra;
1358 self.mb_info[mb_idx].ymode = ymode;
1359 self.mb_info[mb_idx].uvmode = uvmode;
1360 self.mb_info[mb_idx].rframe = VP8Ref::Intra;
1361 self.fill_mv(mb_x, mb_y, ZERO_MV);
1362 } else {
1363 rframe = if !bc.read_prob(self.dstate.prob_last_pred) {
1364 VP8Ref::Last
1365 } else if !bc.read_prob(self.dstate.prob_gold_pred) {
1366 VP8Ref::Golden
1367 } else {
1368 VP8Ref::AltRef
1369 };
1370
1371 let frm_sign = self.get_frame_sign(rframe);
1372 let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y, frm_sign);
1373 let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs);
1374
1375 match mbtype {
1376 VPMBType::InterNearest => {
1377 self.fill_mv(mb_x, mb_y, nearest_mv);
1378 },
1379 VPMBType::InterNear => {
1380 self.fill_mv(mb_x, mb_y, near_mv);
1381 },
1382 VPMBType::InterNoMV => {
1383 self.fill_mv(mb_x, mb_y, ZERO_MV);
1384 },
1385 VPMBType::InterMV => {
1386 let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]);
1387 let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]);
1388 let new_mv = pred_mv + MV{ x: dmx, y: dmy };
1389 self.fill_mv(mb_x, mb_y, new_mv);
1390 },
1391 VPMBType::InterFourMV => {
1392 self.dstate.has_y2 = false;
1393 self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?;
1394 },
1395 _ => unreachable!(),
1396 };
1397
1398 self.fill_ymode(mb_x, mb_y, PredMode::Inter);
1399 self.mb_info[mb_idx].mb_type = mbtype;
1400 self.mb_info[mb_idx].ymode = PredMode::Inter;
1401 self.mb_info[mb_idx].uvmode = PredMode::Inter;
1402 self.mb_info[mb_idx].rframe = rframe;
1403 }
1404 let has_coeffs = if !mb_coeff_skip {
1405 self.decode_residue(bc_main, mb_x)
1406 } else {
1407 let y2_left = self.pcache.y2_pred_left;
1408 let y2_top = self.pcache.y2_pred.data[self.pcache.y2_pred.xpos + mb_x - self.pcache.y2_pred.stride];
1409 self.pcache.reset_left();
1410 if !self.dstate.has_y2 {
1411 self.pcache.y2_pred_left = y2_left;
1412 self.pcache.y2_pred.data[self.pcache.y2_pred.xpos + mb_x] = y2_top;
1413 }
1414 false
1415 };
1416 match self.mb_info[mb_idx].mb_type {
1417 VPMBType::Intra => {
1418 self.recon_intra_mb(&mut dframe, mb_x, mb_y, mb_coeff_skip)?;
1419 },
1420 _ => {
1421 self.recon_inter_mb(&mut dframe, mb_x, mb_y, mb_coeff_skip, rframe);
1422 },
1423 }
1424 if loop_filter {
1425 if let Some(loop_str) = self.dstate.force_loop_str {
1426 self.mb_info[mb_idx].loop_str = loop_str;
1427 } else {
1428 self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level;
1429 }
1430 if self.dstate.lf_delta {
1431 let mut loop_str = self.mb_info[mb_idx].loop_str as i8;
1432 let idx = match self.mb_info[mb_idx].rframe {
1433 VP8Ref::Intra => 0,
1434 VP8Ref::Last => 1,
1435 VP8Ref::Golden => 2,
1436 VP8Ref::AltRef => 3,
1437 };
1438 loop_str += self.dstate.lf_frame_delta[idx];
1439 let idx = match self.mb_info[mb_idx].mb_type {
1440 VPMBType::Intra => 0,
1441 VPMBType::InterNoMV => 1,
1442 VPMBType::InterFourMV => 3,
1443 _ => 2,
1444 };
1445 if self.mb_info[mb_idx].mb_type != VPMBType::Intra || self.mb_info[mb_idx].ymode == PredMode::BPred {
1446 loop_str += self.dstate.lf_mode_delta[idx];
1447 }
1448 self.mb_info[mb_idx].loop_str = loop_str.max(0).min(63) as u8;
1449 }
1450 self.mb_info[mb_idx].inner_filt = has_coeffs || (self.mb_info[mb_idx].ymode == PredMode::BPred) || (self.mb_info[mb_idx].mb_type == VPMBType::InterFourMV);
1451 }
1452 mb_idx += 1;
1453 }
1454 self.pcache.update_row();
1455 self.pcache.reset_left();
1456 }
1457 if loop_filter {
1458 let mut mb_idx = 0;
1459 for mb_y in 0..self.mb_h {
1460 for mb_x in 0..self.mb_w {
1461 let loop_str = self.mb_info[mb_idx].loop_str;
1462 if loop_str > 0 {
1463 self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str, self.mb_info[mb_idx].inner_filt);
1464 }
1465 mb_idx += 1;
1466 }
1467 }
1468 }
1469
1470 if !keep_probs {
1471 self.dstate.restore(&self.tmp_probs);
1472 }
1473
1474 match update_ar {
1475 1 => {
1476 let last = self.shuf.get_last().unwrap();
1477 self.shuf.add_altref_frame(last);
1478 },
1479 2 => {
1480 let golden = self.shuf.get_golden().unwrap();
1481 self.shuf.add_altref_frame(golden);
1482 },
1483 _ => {},
1484 };
1485 match update_gf {
1486 4 => self.shuf.add_golden_frame(buf.clone()),
1487 1 => {
1488 let last = self.shuf.get_last().unwrap();
1489 self.shuf.add_golden_frame(last);
1490 },
1491 2 => {
1492 let altref = self.shuf.get_altref().unwrap();
1493 self.shuf.add_golden_frame(altref);
1494 },
1495 _ => {},
1496 };
1497 if update_ar == 4 {
1498 self.shuf.add_altref_frame(buf.clone());
1499 }
1500 if update_last {
1501 self.shuf.add_frame(buf.clone());
1502 }
1503
1504 let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf));
1505 frm.set_keyframe(self.dstate.is_intra);
1506 frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P });
1507 Ok(frm.into_ref())
1508 }
1509 fn flush(&mut self) {
1510 self.shuf.clear();
1511 }
1512}
1513
1514impl NAOptionHandler for VP8Decoder {
1515 fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] }
1516 fn set_options(&mut self, _options: &[NAOption]) { }
1517 fn query_option_value(&self, _name: &str) -> Option<NAValue> { None }
1518}
1519
1520pub fn get_decoder() -> Box<dyn NADecoder + Send> {
1521 Box::new(VP8Decoder::new())
1522}
1523
1524#[cfg(test)]
1525mod test {
1526 use nihav_core::codecs::RegisteredDecoders;
1527 use nihav_core::demuxers::RegisteredDemuxers;
1528 use nihav_codec_support::test::dec_video::*;
1529 use crate::duck_register_all_decoders;
1530 use crate::duck_register_all_demuxers;
1531
886cde48 1532 // all samples are from the official VP8 test bitstreams set
d0d21988
KS
1533 fn test_vp8_core(name: &str, hash: [u32; 4]) {
1534 let mut dmx_reg = RegisteredDemuxers::new();
1535 duck_register_all_demuxers(&mut dmx_reg);
1536 let mut dec_reg = RegisteredDecoders::new();
1537 duck_register_all_decoders(&mut dec_reg);
1538
b8e71e0a 1539 test_decoding("dkivf", "vp8", name, None, &dmx_reg,
d0d21988
KS
1540 &dec_reg, ExpectedTestResult::MD5(hash));
1541 }
1542
1543 #[test]
1544 fn test_vp8_01() {
1545 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-001.ivf",
1546 [0xfad12607, 0x4e1bd536, 0x3d43b9d1, 0xcadddb71]);
1547 }
1548 #[test]
1549 fn test_vp8_02() {
1550 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-002.ivf",
1551 [0x182f03dd, 0x264ebac0, 0x4e24c7c9, 0x499d7cdb]);
1552 }
1553 #[test]
1554 fn test_vp8_03() {
1555 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-003.ivf",
1556 [0xe5fe668b, 0x03390002, 0x2c3eb0ba, 0x76a44bd1]);
1557 }
1558 #[test]
1559 fn test_vp8_04() {
1560 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-004.ivf",
1561 [0x95097ce9, 0x808c1d47, 0xe03f99c4, 0x8ad111ec]);
1562 }
1563 #[test]
1564 fn test_vp8_05() {
1565 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-005.ivf",
1566 [0x0f469e4f, 0xd1dea533, 0xe5580688, 0xb2d242ff]);
1567 }
1568 /*#[test]
1569 fn test_vp8_06() {
1570 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-006.ivf",
1571 [0;4]);
1572 }*/
1573 #[test]
1574 fn test_vp8_07() {
1575 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-007.ivf",
1576 [0x92526913, 0xd89b6a9b, 0x00f2d602, 0xdef08bce]);
1577 }
1578 #[test]
1579 fn test_vp8_08() {
1580 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-008.ivf",
1581 [0x1676d1eb, 0x19bd175e, 0xc5bb10f5, 0xd49f24f1]);
1582 }
1583 #[test]
1584 fn test_vp8_09() {
1585 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-009.ivf",
1586 [0x19201a2d, 0x535bd82f, 0x41c1a565, 0x8def5379]);
1587 }
1588 #[test]
1589 fn test_vp8_10() {
1590 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-010.ivf",
1591 [0x61d05919, 0xa9883d9f, 0x215eb3f2, 0xdb63eb13]);
1592 }
1593 #[test]
1594 fn test_vp8_11() {
1595 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-011.ivf",
1596 [0x1a0afe5e, 0x70512a03, 0x323a8f11, 0x76bcf022]);
1597 }
1598 #[test]
1599 fn test_vp8_12() {
1600 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-012.ivf",
1601 [0x4ea997c8, 0x0dc2087e, 0x6deec81f, 0x1ecf6668]);
1602 }
1603 #[test]
1604 fn test_vp8_13() {
1605 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-013.ivf",
1606 [0x93169305, 0xd3054327, 0xbe3cc074, 0xf0773a75]);
1607 }
1608 /*#[test]
1609 fn test_vp8_14() {
1610 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-014.ivf",
1611 [0;4]);
1612 }*/
1613 #[test]
1614 fn test_vp8_15() {
1615 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-015.ivf",
1616 [0x23b9cc58, 0x2e344726, 0xe76cda09, 0x2b416bcf]);
1617 }
1618 #[test]
1619 fn test_vp8_16() {
1620 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-016.ivf",
1621 [0x55e889d2, 0x2f99718c, 0xf6936d55, 0xf8ade12b]);
1622 }
1623 #[test]
1624 fn test_vp8_17() {
1625 test_vp8_core("assets/Duck/VP8/vp80-00-comprehensive-017.ivf",
1626 [0x95a68ffb, 0x228d1d8c, 0x6ee54f16, 0xa10fb9eb]);
1627 }
1628}
1629
1630const DC_QUANTS: [i16; 128] = [
1631 4, 5, 6, 7, 8, 9, 10, 10,
1632 11, 12, 13, 14, 15, 16, 17, 17,
1633 18, 19, 20, 20, 21, 21, 22, 22,
1634 23, 23, 24, 25, 25, 26, 27, 28,
1635 29, 30, 31, 32, 33, 34, 35, 36,
1636 37, 37, 38, 39, 40, 41, 42, 43,
1637 44, 45, 46, 46, 47, 48, 49, 50,
1638 51, 52, 53, 54, 55, 56, 57, 58,
1639 59, 60, 61, 62, 63, 64, 65, 66,
1640 67, 68, 69, 70, 71, 72, 73, 74,
1641 75, 76, 76, 77, 78, 79, 80, 81,
1642 82, 83, 84, 85, 86, 87, 88, 89,
1643 91, 93, 95, 96, 98, 100, 101, 102,
1644 104, 106, 108, 110, 112, 114, 116, 118,
1645 122, 124, 126, 128, 130, 132, 134, 136,
1646 138, 140, 143, 145, 148, 151, 154, 157
1647];
1648
1649const AC_QUANTS: [i16; 128] = [
1650 4, 5, 6, 7, 8, 9, 10, 11,
1651 12, 13, 14, 15, 16, 17, 18, 19,
1652 20, 21, 22, 23, 24, 25, 26, 27,
1653 28, 29, 30, 31, 32, 33, 34, 35,
1654 36, 37, 38, 39, 40, 41, 42, 43,
1655 44, 45, 46, 47, 48, 49, 50, 51,
1656 52, 53, 54, 55, 56, 57, 58, 60,
1657 62, 64, 66, 68, 70, 72, 74, 76,
1658 78, 80, 82, 84, 86, 88, 90, 92,
1659 94, 96, 98, 100, 102, 104, 106, 108,
1660 110, 112, 114, 116, 119, 122, 125, 128,
1661 131, 134, 137, 140, 143, 146, 149, 152,
1662 155, 158, 161, 164, 167, 170, 173, 177,
1663 181, 185, 189, 193, 197, 201, 205, 209,
1664 213, 217, 221, 225, 229, 234, 239, 245,
1665 249, 254, 259, 264, 269, 274, 279, 284
1666];