]>
Commit | Line | Data |
---|---|---|
1 | use nihav_core::codecs::*; | |
2 | use nihav_core::io::byteio::*; | |
3 | use nihav_codec_support::codecs::{MV, ZERO_MV}; | |
4 | use super::vpcommon::*; | |
5 | use super::vp78::*; | |
6 | use super::vp78data::*; | |
7 | use super::vp78dsp::*; | |
8 | use super::vp7data::*; | |
9 | use super::vp7dsp::*; | |
10 | ||
11 | const PITCH_MODE_NORMAL: u8 = 0; | |
12 | const PITCH_MODE_FOUR: u8 = 1; | |
13 | const PITCH_MODE_X2: u8 = 2; | |
14 | const PITCH_MODE_X4: u8 = 3; | |
15 | ||
16 | #[derive(Clone,Copy,Default)] | |
17 | struct MBFeature { | |
18 | present_prob: u8, | |
19 | tree_probs: [u8; 3], | |
20 | def_val: [u8; 4], | |
21 | } | |
22 | ||
23 | struct SBParams<'a> { | |
24 | coef_probs: &'a [[[[u8; 11]; 3]; 8]; 4], | |
25 | scan: &'a [usize; 16], | |
26 | qmat: &'a [i16; 16], | |
27 | } | |
28 | ||
29 | fn decode_subblock(bc: &mut BoolCoder, coeffs: &mut [i16; 16], ctype: usize, pctx: u8, sbparams: &SBParams) -> u8 { | |
30 | let mut has_nz = 0; | |
31 | let start = if ctype != 0 { 0 } else { 1 }; | |
32 | *coeffs = [0; 16]; | |
33 | let mut cval = pctx as usize; | |
34 | for idx in start..16 { | |
35 | let probs = &sbparams.coef_probs[ctype][COEF_BANDS[idx]][cval]; | |
36 | let tok = bc.read_tree(COEF_TREE, probs); | |
37 | if tok == DCTToken::EOB { break; } | |
38 | let level = expand_token(bc, tok); | |
39 | coeffs[sbparams.scan[idx]] = level.wrapping_mul(sbparams.qmat[idx]); | |
40 | cval = level.abs().min(2) as usize; | |
41 | has_nz |= cval; | |
42 | } | |
43 | if has_nz > 0 { 1 } else { 0 } | |
44 | } | |
45 | ||
46 | #[derive(Clone,Copy,Default)] | |
47 | struct MBInfo { | |
48 | mb_type: VPMBType, | |
49 | ymode: PredMode, | |
50 | uvmode: PredMode, | |
51 | loop_str: u8, | |
52 | upd_gf: bool, | |
53 | } | |
54 | ||
55 | #[derive(Default)] | |
56 | struct DecoderState { | |
57 | features: [Option<MBFeature>; 4], | |
58 | ||
59 | fading: bool, | |
60 | fade_alpha: u16, | |
61 | fade_beta: u16, | |
62 | ||
63 | lf_simple: bool, | |
64 | loop_filter_level: u8, | |
65 | loop_sharpness: u8, | |
66 | ||
67 | is_intra: bool, | |
68 | version: u8, | |
69 | ||
70 | kf_ymode_prob: [u8; 4], | |
71 | kf_uvmode_prob: [u8; 3], | |
72 | ||
73 | prob_intra_pred: u8, | |
74 | prob_last_pred: u8, | |
75 | ||
76 | coef_probs: [[[[u8; 11]; 3]; 8]; 4], | |
77 | mv_probs: [[u8; 17]; 2], | |
78 | ||
79 | force_quant: Option<u8>, | |
80 | force_loop_str: Option<u8>, | |
81 | force_gf_update: bool, | |
82 | force_pitch: Option<u8>, | |
83 | ||
84 | has_y2: bool, | |
85 | pdc_pred_val: [i16; 2], | |
86 | pdc_pred_count: [usize; 2], | |
87 | ||
88 | ipred_ctx_y: IPredContext, | |
89 | ipred_ctx_u: IPredContext, | |
90 | ipred_ctx_v: IPredContext, | |
91 | } | |
92 | ||
93 | impl DecoderState { | |
94 | fn reset(&mut self) { | |
95 | self.kf_ymode_prob.copy_from_slice(Y_MODE_TREE_PROBS); | |
96 | self.kf_uvmode_prob.copy_from_slice(UV_MODE_TREE_PROBS); | |
97 | self.coef_probs.copy_from_slice(&DEFAULT_DCT_PROBS); | |
98 | self.mv_probs.copy_from_slice(&DEFAULT_MV_PROBS); | |
99 | } | |
100 | } | |
101 | ||
102 | fn decode_mv_component(bc: &mut BoolCoder, probs: &[u8; 17]) -> i16 { | |
103 | let val = if !bc.read_prob(probs[0]) { | |
104 | bc.read_tree(SMALL_MV_TREE, &probs[2..9]) | |
105 | } else { | |
106 | let raw_probs = &probs[9..]; | |
107 | let mut raw = 0; | |
108 | for ord in LONG_VECTOR_ORDER.iter() { | |
109 | raw |= (bc.read_prob(raw_probs[*ord]) as i16) << *ord; | |
110 | } | |
111 | if (raw & 0xF0) != 0 { | |
112 | raw |= (bc.read_prob(raw_probs[3]) as i16) << 3; | |
113 | } else { | |
114 | raw |= 1 << 3; | |
115 | } | |
116 | raw | |
117 | }; | |
118 | if (val == 0) || !bc.read_prob(probs[1]) { | |
119 | val | |
120 | } else { | |
121 | -val | |
122 | } | |
123 | } | |
124 | ||
125 | struct VP7Decoder { | |
126 | info: NACodecInfoRef, | |
127 | ||
128 | shuf: VPShuffler, | |
129 | width: usize, | |
130 | height: usize, | |
131 | mb_w: usize, | |
132 | mb_h: usize, | |
133 | mb_info: Vec<MBInfo>, | |
134 | mvs: Vec<MV>, | |
135 | mv_stride: usize, | |
136 | ||
137 | ymodes: Vec<PredMode>, | |
138 | ymode_stride: usize, | |
139 | uvmodes: Vec<PredMode>, | |
140 | uvmode_stride: usize, | |
141 | ||
142 | dstate: DecoderState, | |
143 | pcache: PredCache, | |
144 | ||
145 | coeffs: [[i16; 16]; 25], | |
146 | scan: [usize; 16], | |
147 | qmat: [[[i16; 16]; 3]; 5], | |
148 | ||
149 | mc_buf: NAVideoBufferRef<u8>, | |
150 | ||
151 | tmp_scan: [usize; 16], | |
152 | } | |
153 | ||
154 | impl VP7Decoder { | |
155 | fn new() -> Self { | |
156 | let vt = alloc_video_buffer(NAVideoInfo::new(128, 128, false, YUV420_FORMAT), 4).unwrap(); | |
157 | let mut scan = [0; 16]; | |
158 | scan.copy_from_slice(&DEFAULT_SCAN_ORDER); | |
159 | let mc_buf = vt.get_vbuf().unwrap(); | |
160 | Self { | |
161 | info: NACodecInfoRef::default(), | |
162 | ||
163 | shuf: VPShuffler::new(), | |
164 | width: 0, | |
165 | height: 0, | |
166 | mb_w: 0, | |
167 | mb_h: 0, | |
168 | mb_info: Vec::new(), | |
169 | mvs: Vec::new(), | |
170 | mv_stride: 0, | |
171 | ||
172 | ymodes: Vec::new(), | |
173 | ymode_stride: 0, | |
174 | uvmodes: Vec::new(), | |
175 | uvmode_stride: 0, | |
176 | ||
177 | dstate: DecoderState::default(), | |
178 | pcache: PredCache::new(), | |
179 | ||
180 | coeffs: [[0; 16]; 25], | |
181 | scan, | |
182 | tmp_scan: [0; 16], | |
183 | qmat: [[[0; 16]; 3]; 5], | |
184 | ||
185 | mc_buf, | |
186 | } | |
187 | } | |
188 | fn set_dimensions(&mut self, width: usize, height: usize) { | |
189 | if (width == self.width) && (height == self.height) { | |
190 | return; | |
191 | } | |
192 | self.width = width; | |
193 | self.height = height; | |
194 | self.mb_w = (self.width + 15) >> 4; | |
195 | self.mb_h = (self.height + 15) >> 4; | |
196 | self.mb_info.resize(self.mb_w * self.mb_h, MBInfo::default()); | |
197 | self.mv_stride = self.mb_w * 4; | |
198 | self.mvs.resize(self.mv_stride * self.mb_h * 4, ZERO_MV); | |
199 | ||
200 | self.ymode_stride = self.mb_w * 4; | |
201 | self.uvmode_stride = self.mb_w; | |
202 | self.ymodes.resize(self.ymode_stride * self.mb_h * 4, PredMode::default()); | |
203 | self.uvmodes.resize(self.uvmode_stride * self.mb_h, PredMode::default()); | |
204 | ||
205 | self.pcache.resize(self.mb_w); | |
206 | } | |
207 | fn read_features(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> { | |
208 | for (i, feat) in self.dstate.features.iter_mut().enumerate() { | |
209 | if bc.read_bool() { | |
210 | let mut feature = MBFeature::default(); | |
211 | feature.present_prob = bc.read_byte(); | |
212 | for tp in feature.tree_probs.iter_mut() { | |
213 | if bc.read_bool() { | |
214 | *tp = bc.read_byte(); | |
215 | } else { | |
216 | *tp = 255; | |
217 | } | |
218 | } | |
219 | if i != 2 { | |
220 | let fbits = match i { | |
221 | 0 => 7, | |
222 | 1 => 6, | |
223 | _ => if self.dstate.version == 0 { 8 } else { 5 }, | |
224 | }; | |
225 | for dval in feature.def_val.iter_mut() { | |
226 | if bc.read_bool() { | |
227 | *dval = bc.read_bits(fbits) as u8; | |
228 | } else { | |
229 | *dval = 0; | |
230 | } | |
231 | } | |
232 | } | |
233 | *feat = Some(feature); | |
234 | } else { | |
235 | *feat = None; | |
236 | } | |
237 | } | |
238 | Ok(()) | |
239 | } | |
240 | fn read_dct_coef_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> { | |
241 | for i in 0..4 { | |
242 | for j in 0..8 { | |
243 | for k in 0..3 { | |
244 | for l in 0..11 { | |
245 | if bc.read_prob(DCT_UPDATE_PROBS[i][j][k][l]) { | |
246 | self.dstate.coef_probs[i][j][k][l] = bc.read_byte(); | |
247 | } | |
248 | } | |
249 | } | |
250 | } | |
251 | } | |
252 | Ok(()) | |
253 | } | |
254 | fn read_mv_prob_upd(&mut self, bc: &mut BoolCoder) -> DecoderResult<()> { | |
255 | for comp in 0..2 { | |
256 | for i in 0..17 { | |
257 | if bc.read_prob(MV_UPDATE_PROBS[comp][i]) { | |
258 | self.dstate.mv_probs[comp][i] = bc.read_probability(); | |
259 | } | |
260 | } | |
261 | } | |
262 | Ok(()) | |
263 | } | |
264 | fn decode_mb_features(&mut self, bc: &mut BoolCoder, _mb_x: usize, _mb_y: usize) -> DecoderResult<()> { | |
265 | self.dstate.force_quant = None; | |
266 | self.dstate.force_loop_str = None; | |
267 | self.dstate.force_gf_update = false; | |
268 | self.dstate.force_pitch = None; | |
269 | for (i, feat) in self.dstate.features.iter().enumerate() { | |
270 | if let Some(feat) = feat { | |
271 | let present = bc.read_prob(feat.present_prob); | |
272 | if present { | |
273 | let ftype_idx = bc.read_tree(FEATURE_TREE, &feat.tree_probs); | |
274 | let val = feat.def_val[ftype_idx]; | |
275 | match i { | |
276 | 0 => self.dstate.force_quant = Some(ftype_idx as u8), | |
277 | 1 => self.dstate.force_loop_str = Some(val), | |
278 | 2 => self.dstate.force_gf_update = true, | |
279 | _ => self.dstate.force_pitch = Some(val), | |
280 | }; | |
281 | } | |
282 | } | |
283 | } | |
284 | Ok(()) | |
285 | } | |
286 | fn decode_residue(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_idx: usize, use_last: bool) { | |
287 | let qmat_idx = if let Some(idx) = self.dstate.force_quant { (idx as usize) + 1 } else { 0 }; | |
288 | let mut sbparams = SBParams { | |
289 | scan: &DEFAULT_SCAN_ORDER, | |
290 | qmat: &self.qmat[qmat_idx][2], | |
291 | coef_probs: &self.dstate.coef_probs, | |
292 | }; | |
293 | let mut has_ac = [false; 25]; | |
294 | let ytype; | |
295 | if self.dstate.has_y2 { | |
296 | let pred = &self.pcache.y2_pred; | |
297 | let pidx = pred.xpos + mb_x; | |
298 | let pctx = self.pcache.y2_pred_left + pred.data[pidx - pred.stride]; | |
299 | ||
300 | let has_nz = decode_subblock(bc, &mut self.coeffs[24], 1, pctx, &sbparams); | |
301 | self.pcache.y2_pred.data[pidx] = has_nz; | |
302 | self.pcache.y2_pred_left = has_nz; | |
303 | has_ac[24] = has_nz > 0; | |
304 | ||
305 | ytype = 0; | |
306 | } else { | |
307 | let pred = &mut self.pcache.y2_pred; | |
308 | let pidx = pred.xpos + mb_x; | |
309 | pred.data[pidx] = pred.data[pidx - pred.stride]; | |
310 | ||
311 | ytype = 3; | |
312 | } | |
313 | sbparams.scan = &self.scan; | |
314 | sbparams.qmat = &self.qmat[qmat_idx][0]; | |
315 | for i in 0..16 { | |
316 | let bx = i & 3; | |
317 | let by = i >> 2; | |
318 | let pred = &self.pcache.y_pred; | |
319 | let pidx = pred.xpos + mb_x * 4 + bx + by * pred.stride; | |
320 | let pctx = self.pcache.y_pred_left[by] + pred.data[pidx - pred.stride]; | |
321 | ||
322 | let has_nz = decode_subblock(bc, &mut self.coeffs[i], ytype, pctx, &sbparams); | |
323 | self.pcache.y_pred.data[pidx] = has_nz; | |
324 | self.pcache.y_pred_left[by] = has_nz; | |
325 | has_ac[i] = has_nz > 0; | |
326 | } | |
327 | sbparams.qmat = &self.qmat[qmat_idx][1]; | |
328 | for i in 16..20 { | |
329 | let bx = i & 1; | |
330 | let by = (i >> 1) & 1; | |
331 | let pred = &self.pcache.u_pred; | |
332 | let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride; | |
333 | let pctx = self.pcache.u_pred_left[by] + pred.data[pidx - pred.stride]; | |
334 | ||
335 | let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams); | |
336 | self.pcache.u_pred.data[pidx] = has_nz; | |
337 | self.pcache.u_pred_left[by] = has_nz; | |
338 | has_ac[i] = has_nz > 0; | |
339 | } | |
340 | for i in 20..24 { | |
341 | let bx = i & 1; | |
342 | let by = (i >> 1) & 1; | |
343 | let pred = &self.pcache.v_pred; | |
344 | let pidx = pred.xpos + mb_x * 2 + bx + by * pred.stride; | |
345 | let pctx = self.pcache.v_pred_left[by] + pred.data[pidx - pred.stride]; | |
346 | ||
347 | let has_nz = decode_subblock(bc, &mut self.coeffs[i], 2, pctx, &sbparams); | |
348 | self.pcache.v_pred.data[pidx] = has_nz; | |
349 | self.pcache.v_pred_left[by] = has_nz; | |
350 | has_ac[i] = has_nz > 0; | |
351 | } | |
352 | ||
353 | if self.dstate.has_y2 { | |
354 | let y2block = &mut self.coeffs[24]; | |
355 | if self.mb_info[mb_idx].mb_type != VPMBType::Intra { | |
356 | let mut dc = y2block[0]; | |
357 | let pdc_idx = if use_last { 0 } else { 1 }; | |
358 | let pval = self.dstate.pdc_pred_val[pdc_idx]; | |
359 | ||
360 | if self.dstate.pdc_pred_count[pdc_idx] > 3 { | |
361 | dc += pval; | |
362 | y2block[0] = dc; | |
363 | } | |
364 | if (pval == 0) || (dc == 0) || ((pval ^ dc) < 0) { | |
365 | self.dstate.pdc_pred_count[pdc_idx] = 0; | |
366 | } else if dc == pval { | |
367 | self.dstate.pdc_pred_count[pdc_idx] += 1; | |
368 | } | |
369 | self.dstate.pdc_pred_val[pdc_idx] = dc; | |
370 | } | |
371 | if has_ac[24] { | |
372 | idct4x4(y2block); | |
373 | } else if y2block[0] != 0 { | |
374 | idct4x4_dc(y2block); | |
375 | } | |
376 | for i in 0..16 { | |
377 | self.coeffs[i][0] = self.coeffs[24][i]; | |
378 | } | |
379 | } | |
380 | for i in 0..24 { | |
381 | if has_ac[i] { | |
382 | idct4x4(&mut self.coeffs[i]); | |
383 | } else if self.coeffs[i][0] != 0 { | |
384 | idct4x4_dc(&mut self.coeffs[i]); | |
385 | } | |
386 | } | |
387 | } | |
388 | ||
389 | fn set_qmat(&mut self, y_dc_q: usize, y_ac_q: usize, y2_dc_q: usize, y2_ac_q: usize, uv_dc_q: usize, uv_ac_q: usize) { | |
390 | self.qmat[0][0][0] = Y_DC_QUANTS[y_dc_q]; | |
391 | for i in 1..16 { | |
392 | self.qmat[0][0][i] = Y_AC_QUANTS[y_ac_q]; | |
393 | } | |
394 | self.qmat[0][1][0] = UV_DC_QUANTS[uv_dc_q]; | |
395 | for i in 1..16 { | |
396 | self.qmat[0][1][i] = UV_AC_QUANTS[uv_ac_q]; | |
397 | } | |
398 | self.qmat[0][2][0] = Y2_DC_QUANTS[y2_dc_q]; | |
399 | for i in 1..16 { | |
400 | self.qmat[0][2][i] = Y2_AC_QUANTS[y2_ac_q]; | |
401 | } | |
402 | if let Some(ref feat) = self.dstate.features[0] { | |
403 | for j in 0..4 { | |
404 | let q = feat.def_val[j] as usize; | |
405 | self.qmat[j + 1][0][0] = Y_DC_QUANTS[q]; | |
406 | for i in 1..16 { | |
407 | self.qmat[j + 1][0][i] = Y_AC_QUANTS[q]; | |
408 | } | |
409 | self.qmat[j + 1][1][0] = UV_DC_QUANTS[q]; | |
410 | for i in 1..16 { | |
411 | self.qmat[j + 1][1][i] = UV_AC_QUANTS[q]; | |
412 | } | |
413 | self.qmat[j + 1][2][0] = Y2_DC_QUANTS[q]; | |
414 | for i in 1..16 { | |
415 | self.qmat[j + 1][2][i] = Y2_AC_QUANTS[q]; | |
416 | } | |
417 | } | |
418 | } | |
419 | } | |
420 | fn fill_ymode(&mut self, mb_x: usize, mb_y: usize, ymode: PredMode) { | |
421 | let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride; | |
422 | for _ in 0..4 { | |
423 | for x in 0..4 { | |
424 | self.ymodes[iidx + x] = ymode; | |
425 | } | |
426 | iidx += self.ymode_stride; | |
427 | } | |
428 | } | |
429 | fn fill_mv(&mut self, mb_x: usize, mb_y: usize, mv: MV) { | |
430 | let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride; | |
431 | for _ in 0..4 { | |
432 | for x in 0..4 { | |
433 | self.mvs[iidx + x] = mv; | |
434 | } | |
435 | iidx += self.mb_w * 4; | |
436 | } | |
437 | } | |
438 | fn find_mv_pred(&self, mb_x: usize, mb_y: usize) -> ([u8; 4], MV, MV, MV) { | |
439 | let mut nearest_mv = ZERO_MV; | |
440 | let mut near_mv = ZERO_MV; | |
441 | ||
442 | let mut ct: [u8; 4] = [0; 4]; | |
443 | ||
444 | let start = if self.dstate.version == 0 { 1 } else { 0 }; | |
445 | let mvwrap = (self.mb_w as isize) + 1; | |
446 | for (yoff, xoff, weight, blk_no) in CAND_POS.iter() { | |
447 | let cx = (mb_x as isize) + (*xoff as isize); | |
448 | let cy = (mb_y as isize) + (*yoff as isize); | |
449 | let mvpos = cx + cy * mvwrap; | |
450 | if (mvpos < start) || ((mvpos % mvwrap) == (mvwrap - 1)) { | |
451 | ct[0] += weight; | |
452 | continue; | |
453 | } | |
454 | let cx = (mvpos % mvwrap) as usize; | |
455 | let cy = (mvpos / mvwrap) as usize; | |
456 | let bx = (*blk_no as usize) & 3; | |
457 | let by = (*blk_no as usize) >> 2; | |
458 | let blk_pos = cx * 4 + bx + (cy * 4 + by) * self.mv_stride; | |
459 | let mv = self.mvs[blk_pos]; | |
460 | if mv == ZERO_MV { | |
461 | ct[0] += weight; | |
462 | continue; | |
463 | } | |
464 | let idx; | |
465 | if (nearest_mv == ZERO_MV) || (nearest_mv == mv) { | |
466 | nearest_mv = mv; | |
467 | idx = 1; | |
468 | } else if near_mv == ZERO_MV { | |
469 | near_mv = mv; | |
470 | idx = 2; | |
471 | } else { | |
472 | idx = if mv == near_mv { 2 } else { 3 }; | |
473 | } | |
474 | ct[idx] += weight; | |
475 | } | |
476 | let pred_mv = if ct[1] > ct[2] { | |
477 | if ct[1] >= ct[0] { nearest_mv } else { ZERO_MV } | |
478 | } else { | |
479 | if ct[2] >= ct[0] { near_mv } else { ZERO_MV } | |
480 | }; | |
481 | ||
482 | let mvprobs = [INTER_MODE_PROBS[ct[0] as usize][0], | |
483 | INTER_MODE_PROBS[ct[1] as usize][1], | |
484 | INTER_MODE_PROBS[ct[2] as usize][2], | |
485 | INTER_MODE_PROBS[ct[2] as usize][3]]; | |
486 | ||
487 | (mvprobs, nearest_mv, near_mv, pred_mv) | |
488 | } | |
489 | fn get_split_mv(&self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, bx: usize, by: usize, pred_mv: MV) -> MV { | |
490 | let mode = bc.read_tree(SUB_MV_REF_TREE, &SUB_MV_REF_PROBS); | |
491 | let mvidx = mb_x * 4 + bx + (mb_y * 4 + by) * self.mv_stride; | |
492 | match mode { | |
493 | SubMVRef::Left => { | |
494 | if (mb_x > 0) || (bx > 0) { | |
495 | self.mvs[mvidx - 1] | |
496 | } else { | |
497 | ZERO_MV | |
498 | } | |
499 | }, | |
500 | SubMVRef::Above => { | |
501 | if (mb_y > 0) || (by > 0) { | |
502 | self.mvs[mvidx - self.mv_stride] | |
503 | } else { | |
504 | ZERO_MV | |
505 | } | |
506 | }, | |
507 | SubMVRef::Zero => ZERO_MV, | |
508 | SubMVRef::New => { | |
509 | let dmy = decode_mv_component(bc, &self.dstate.mv_probs[0]); | |
510 | let dmx = decode_mv_component(bc, &self.dstate.mv_probs[1]); | |
511 | pred_mv + MV{ x: dmx, y: dmy } | |
512 | }, | |
513 | } | |
514 | } | |
515 | fn do_split_mv(&mut self, bc: &mut BoolCoder, mb_x: usize, mb_y: usize, pred_mv: MV) -> DecoderResult<()> { | |
516 | let split_mode = bc.read_tree(MV_SPLIT_MODE_TREE, &MV_SPLIT_MODE_PROBS); | |
517 | let mut mvidx = mb_x * 4 + mb_y * 4 * self.mv_stride; | |
518 | match split_mode { | |
519 | MVSplitMode::TopBottom => { | |
520 | let top_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv); | |
521 | for _ in 0..2 { | |
522 | for x in 0..4 { self.mvs[mvidx + x] = top_mv; } | |
523 | mvidx += self.mv_stride; | |
524 | } | |
525 | let bot_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 2, pred_mv); | |
526 | for _ in 2..4 { | |
527 | for x in 0..4 { self.mvs[mvidx + x] = bot_mv; } | |
528 | mvidx += self.mv_stride; | |
529 | } | |
530 | }, | |
531 | MVSplitMode::LeftRight => { | |
532 | let left_mv = self.get_split_mv(bc, mb_x, mb_y, 0, 0, pred_mv); | |
533 | self.mvs[mvidx + 1] = left_mv; | |
534 | let right_mv = self.get_split_mv(bc, mb_x, mb_y, 2, 0, pred_mv); | |
535 | for _ in 0..4 { | |
536 | self.mvs[mvidx + 0] = left_mv; | |
537 | self.mvs[mvidx + 1] = left_mv; | |
538 | self.mvs[mvidx + 2] = right_mv; | |
539 | self.mvs[mvidx + 3] = right_mv; | |
540 | mvidx += self.mv_stride; | |
541 | } | |
542 | }, | |
543 | MVSplitMode::Quarters => { | |
544 | for y in (0..4).step_by(2) { | |
545 | for x in (0..4).step_by(2) { | |
546 | self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv); | |
547 | self.mvs[mvidx + x + 1] = self.mvs[mvidx + x]; | |
548 | } | |
549 | for x in 0..4 { | |
550 | self.mvs[mvidx + x + self.mv_stride] = self.mvs[mvidx + x]; | |
551 | } | |
552 | mvidx += self.mv_stride * 2; | |
553 | } | |
554 | }, | |
555 | MVSplitMode::Sixteenths => { | |
556 | for y in 0..4 { | |
557 | for x in 0..4 { | |
558 | self.mvs[mvidx + x] = self.get_split_mv(bc, mb_x, mb_y, x, y, pred_mv); | |
559 | } | |
560 | mvidx += self.mv_stride; | |
561 | } | |
562 | }, | |
563 | }; | |
564 | Ok(()) | |
565 | } | |
566 | ||
567 | fn add_residue(&self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, do_luma: bool, pitch_mode: u8) { | |
568 | if do_luma { | |
569 | let ydst = &mut dframe.data[dframe.offset[0]..]; | |
570 | let ystride = dframe.stride[0]; | |
571 | let mut yoff = mb_x * 16 + mb_y * 16 * ystride; | |
572 | match pitch_mode { | |
573 | PITCH_MODE_NORMAL => { | |
574 | for y in 0..4 { | |
575 | for x in 0..4 { | |
576 | add_coeffs4x4(ydst, yoff + x * 4, ystride, &self.coeffs[x + y * 4]); | |
577 | } | |
578 | yoff += 4 * ystride; | |
579 | } | |
580 | }, | |
581 | PITCH_MODE_FOUR => { | |
582 | for y in 0..16 { | |
583 | add_coeffs16x1(ydst, yoff, &self.coeffs[y]); | |
584 | yoff += ystride; | |
585 | } | |
586 | }, | |
587 | PITCH_MODE_X2 => { | |
588 | for y in 0..2 { | |
589 | for x in 0..4 { | |
590 | add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]); | |
591 | } | |
592 | yoff += 8 * ystride; | |
593 | } | |
594 | yoff -= 15 * ystride; | |
595 | for y in 2..4 { | |
596 | for x in 0..4 { | |
597 | add_coeffs4x4(ydst, yoff + x * 4, ystride * 2, &self.coeffs[x + y * 4]); | |
598 | } | |
599 | yoff += 8 * ystride; | |
600 | } | |
601 | }, | |
602 | PITCH_MODE_X4 => { | |
603 | for y in 0..4 { | |
604 | for x in 0..4 { | |
605 | add_coeffs4x4(ydst, yoff + x * 4, ystride * 4, &self.coeffs[x + y * 4]); | |
606 | } | |
607 | yoff += ystride; | |
608 | } | |
609 | }, | |
610 | _ => unreachable!(), | |
611 | }; | |
612 | } | |
613 | let dst = &mut dframe.data[0..]; | |
614 | let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1]; | |
615 | let ustride = dframe.stride[1]; | |
616 | let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2]; | |
617 | let vstride = dframe.stride[2]; | |
618 | if (pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_FOUR) { | |
619 | for y in 0..2 { | |
620 | for x in 0..2 { | |
621 | add_coeffs4x4(dst, uoff + x * 4, ustride, &self.coeffs[16 + x + y * 2]); | |
622 | add_coeffs4x4(dst, voff + x * 4, vstride, &self.coeffs[20 + x + y * 2]); | |
623 | } | |
624 | uoff += ustride * 4; | |
625 | voff += vstride * 4; | |
626 | } | |
627 | } else { | |
628 | for y in 0..2 { | |
629 | for x in 0..2 { | |
630 | add_coeffs4x4(dst, uoff + x * 4, ustride * 2, &self.coeffs[16 + x + y * 2]); | |
631 | add_coeffs4x4(dst, voff + x * 4, vstride * 2, &self.coeffs[20 + x + y * 2]); | |
632 | } | |
633 | uoff += ustride; | |
634 | voff += vstride; | |
635 | } | |
636 | } | |
637 | } | |
638 | fn recon_intra_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize) -> DecoderResult<()> { | |
639 | let pitch = self.dstate.force_pitch.unwrap_or(0); | |
640 | let pitch_mode = (pitch >> 3) & 3; | |
641 | ||
642 | let mb_idx = mb_x + mb_y * self.mb_w; | |
643 | let has_top = mb_y > 0; | |
644 | let has_left = mb_x > 0; | |
645 | let ydst = &mut dframe.data[dframe.offset[0]..]; | |
646 | let ystride = dframe.stride[0]; | |
647 | let mut yoff = mb_x * 16 + mb_y * 16 * ystride; | |
648 | let ipred_ctx_y = &mut self.dstate.ipred_ctx_y; | |
649 | ipred_ctx_y.has_top = has_top; | |
650 | ipred_ctx_y.has_left = has_left; | |
651 | let is_normal = self.mb_info[mb_idx].ymode != PredMode::BPred; | |
652 | if is_normal { | |
653 | ipred_ctx_y.fill(ydst, yoff, ystride, 16, 16); | |
654 | match self.mb_info[mb_idx].ymode { | |
655 | PredMode::DCPred => IPred16x16::ipred_dc(ydst, yoff, ystride, ipred_ctx_y), | |
656 | PredMode::HPred => IPred16x16::ipred_h (ydst, yoff, ystride, ipred_ctx_y), | |
657 | PredMode::VPred => IPred16x16::ipred_v (ydst, yoff, ystride, ipred_ctx_y), | |
658 | PredMode::TMPred => IPred16x16::ipred_tm(ydst, yoff, ystride, ipred_ctx_y), | |
659 | _ => unreachable!(), | |
660 | }; | |
661 | } else { | |
662 | validate!((pitch_mode == PITCH_MODE_NORMAL) || (pitch_mode == PITCH_MODE_X2)); | |
663 | let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride; | |
664 | let mut tr_save = [0x80u8; 16]; | |
665 | if pitch_mode == PITCH_MODE_X2 { | |
666 | // reorganise coefficient data for interlaced case | |
667 | for y in (0..4).step_by(2) { | |
668 | for x in 0..4 { | |
669 | let mut tmpblock = [0i16; 16 * 2]; | |
670 | let eidx = x + y * 4; | |
671 | let oidx = x + y * 4 + 4; | |
672 | for i in 0..4 { | |
673 | for j in 0..4 { | |
674 | tmpblock[i * 8 + 0 + j] = self.coeffs[eidx][i * 4 + j]; | |
675 | tmpblock[i * 8 + 4 + j] = self.coeffs[oidx][i * 4 + j]; | |
676 | } | |
677 | } | |
678 | self.coeffs[eidx].copy_from_slice(&tmpblock[0..16]); | |
679 | self.coeffs[oidx].copy_from_slice(&tmpblock[16..32]); | |
680 | } | |
681 | } | |
682 | } | |
683 | let tr_edge = if has_top { ydst[yoff - ystride + 15] } else { 0x80 }; | |
684 | for y in 0..4 { | |
685 | for x in 0..4 { | |
686 | ipred_ctx_y.has_left = has_left || x > 0; | |
687 | let bmode = self.ymodes[iidx + x]; | |
688 | let cur_yoff = yoff + x * 4; | |
689 | let has_tr = ipred_ctx_y.has_top && ((x < 3) || ((y == 0) && (mb_y < self.mb_w - 1))); | |
690 | let has_dl = ipred_ctx_y.has_left && (x == 0) && (y < 3); | |
691 | ipred_ctx_y.fill(ydst, cur_yoff, ystride, | |
692 | if has_tr { 8 } else { 4 }, | |
693 | if has_dl { 8 } else { 4 }); | |
694 | if !has_tr { | |
695 | for i in 0..4 { | |
696 | ipred_ctx_y.top[i + 4] = tr_save[x * 4 + i]; | |
697 | } | |
698 | } else { | |
699 | for i in 0..4 { | |
700 | tr_save[x * 4 + i] = ipred_ctx_y.top[i + 4]; | |
701 | } | |
702 | } | |
703 | if (mb_x == self.mb_w - 1) && has_top && (x == 3) { | |
704 | for i in 0..4 { | |
705 | ipred_ctx_y.top[i + 4] = tr_edge; | |
706 | } | |
707 | } | |
708 | match bmode { | |
709 | PredMode::DCPred => IPred4x4::ipred_dc(ydst, cur_yoff, ystride, ipred_ctx_y), | |
710 | PredMode::TMPred => IPred4x4::ipred_tm(ydst, cur_yoff, ystride, ipred_ctx_y), | |
711 | PredMode::HPred => IPred4x4::ipred_he(ydst, cur_yoff, ystride, ipred_ctx_y), | |
712 | PredMode::VPred => IPred4x4::ipred_ve(ydst, cur_yoff, ystride, ipred_ctx_y), | |
713 | PredMode::LDPred => IPred4x4::ipred_ld(ydst, cur_yoff, ystride, ipred_ctx_y), | |
714 | PredMode::RDPred => IPred4x4::ipred_rd(ydst, cur_yoff, ystride, ipred_ctx_y), | |
715 | PredMode::VRPred => IPred4x4::ipred_vr(ydst, cur_yoff, ystride, ipred_ctx_y), | |
716 | PredMode::VLPred => IPred4x4::ipred_vl(ydst, cur_yoff, ystride, ipred_ctx_y), | |
717 | PredMode::HDPred => IPred4x4::ipred_hd(ydst, cur_yoff, ystride, ipred_ctx_y), | |
718 | PredMode::HUPred => IPred4x4::ipred_hu(ydst, cur_yoff, ystride, ipred_ctx_y), | |
719 | _ => unreachable!(), | |
720 | }; | |
721 | add_coeffs4x4(ydst, cur_yoff, ystride, &self.coeffs[x + y * 4]); | |
722 | } | |
723 | ipred_ctx_y.has_top = true; | |
724 | yoff += 4 * ystride; | |
725 | iidx += self.ymode_stride; | |
726 | } | |
727 | } | |
728 | let dst = &mut dframe.data[0..]; | |
729 | let uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1]; | |
730 | let ustride = dframe.stride[1]; | |
731 | let voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2]; | |
732 | let vstride = dframe.stride[2]; | |
733 | let ipred_ctx_u = &mut self.dstate.ipred_ctx_u; | |
734 | let ipred_ctx_v = &mut self.dstate.ipred_ctx_v; | |
735 | ipred_ctx_u.has_top = has_top; | |
736 | ipred_ctx_v.has_top = has_top; | |
737 | ipred_ctx_u.has_left = has_left; | |
738 | ipred_ctx_v.has_left = has_left; | |
739 | ipred_ctx_u.fill(dst, uoff, ustride, 8, 8); | |
740 | ipred_ctx_v.fill(dst, voff, vstride, 8, 8); | |
741 | match self.mb_info[mb_idx].uvmode { | |
742 | PredMode::DCPred => { | |
743 | IPred8x8::ipred_dc(dst, uoff, ustride, ipred_ctx_u); | |
744 | IPred8x8::ipred_dc(dst, voff, vstride, ipred_ctx_v); | |
745 | }, | |
746 | PredMode::HPred => { | |
747 | IPred8x8::ipred_h(dst, uoff, ustride, ipred_ctx_u); | |
748 | IPred8x8::ipred_h(dst, voff, vstride, ipred_ctx_v); | |
749 | }, | |
750 | PredMode::VPred => { | |
751 | IPred8x8::ipred_v(dst, uoff, ustride, ipred_ctx_u); | |
752 | IPred8x8::ipred_v(dst, voff, vstride, ipred_ctx_v); | |
753 | }, | |
754 | PredMode::TMPred => { | |
755 | IPred8x8::ipred_tm(dst, uoff, ustride, ipred_ctx_u); | |
756 | IPred8x8::ipred_tm(dst, voff, vstride, ipred_ctx_v); | |
757 | }, | |
758 | _ => unreachable!(), | |
759 | }; | |
760 | self.add_residue(dframe, mb_x, mb_y, is_normal, pitch_mode); | |
761 | Ok(()) | |
762 | } | |
763 | fn recon_inter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, use_last: bool) { | |
764 | let pitch = self.dstate.force_pitch.unwrap_or(0); | |
765 | let pitch_dmode = (pitch >> 3) & 3; | |
766 | let pitch_smode = pitch & 7; | |
767 | ||
768 | let refframe = (if use_last { self.shuf.get_last() } else { self.shuf.get_golden() }).unwrap(); | |
769 | let single_mv = self.mb_info[mb_x + mb_y * self.mb_w].mb_type != VPMBType::InterFourMV; | |
770 | let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride; | |
771 | let mut mc_buf = self.mc_buf.get_data_mut().unwrap(); | |
772 | ||
773 | let dst = &mut dframe.data[0..]; | |
774 | let ystride = dframe.stride[0]; | |
775 | let mut yoff = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride; | |
776 | if pitch_smode == 0 { | |
777 | if single_mv { | |
778 | mc_block16x16(dst, yoff, ystride, mb_x * 16, mb_y * 16, | |
779 | self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, refframe.clone(), 0, &mut mc_buf); | |
780 | } else { | |
781 | for y in 0..4 { | |
782 | for x in 0..4 { | |
783 | mc_block4x4(dst, yoff + x * 4, ystride, mb_x * 16 + x * 4, mb_y * 16 + y * 4, | |
784 | self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, refframe.clone(), 0, &mut mc_buf); | |
785 | } | |
786 | yoff += 4 * ystride; | |
787 | iidx += self.mv_stride; | |
788 | } | |
789 | } | |
790 | } else { | |
791 | if single_mv { | |
792 | mc_block_special(dst, yoff, ystride, mb_x * 16, mb_y * 16, | |
793 | self.mvs[iidx].x * 2, self.mvs[iidx].y * 2, | |
794 | refframe.clone(), 0, &mut mc_buf, 16, pitch_smode); | |
795 | } else { | |
796 | for y in 0..4 { | |
797 | for x in 0..4 { | |
798 | mc_block_special(dst, yoff + x * 4, ystride, | |
799 | mb_x * 16 + x * 4, mb_y * 16 + y * 4, | |
800 | self.mvs[iidx + x].x * 2, self.mvs[iidx + x].y * 2, | |
801 | refframe.clone(), 0, &mut mc_buf, 4, pitch_smode); | |
802 | } | |
803 | yoff += 4 * ystride; | |
804 | iidx += self.mv_stride; | |
805 | } | |
806 | } | |
807 | } | |
808 | ||
809 | let mut iidx = mb_x * 4 + mb_y * 4 * self.mv_stride; | |
810 | let mut uoff = dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1]; | |
811 | let ustride = dframe.stride[1]; | |
812 | let mut voff = dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2]; | |
813 | let vstride = dframe.stride[2]; | |
814 | if single_mv { | |
815 | let chroma_mv = self.mvs[iidx]; | |
816 | ||
817 | if pitch_smode == 0 { | |
818 | mc_block8x8(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf); | |
819 | mc_block8x8(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, refframe, 2, &mut mc_buf); | |
820 | } else { | |
821 | mc_block_special(dst, uoff, ustride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, | |
822 | refframe.clone(), 1, &mut mc_buf, 8, pitch_smode); | |
823 | mc_block_special(dst, voff, vstride, mb_x * 8, mb_y * 8, chroma_mv.x, chroma_mv.y, | |
824 | refframe, 2, &mut mc_buf, 8, pitch_smode); | |
825 | } | |
826 | } else { | |
827 | for y in 0..2 { | |
828 | for x in 0..2 { | |
829 | let mut chroma_mv = self.mvs[iidx + x * 2] + self.mvs[iidx + x * 2 + 1] | |
830 | + self.mvs[iidx + x * 2 + self.mv_stride] | |
831 | + self.mvs[iidx + x * 2 + self.mv_stride + 1]; | |
832 | if chroma_mv.x < 0 { | |
833 | chroma_mv.x += 1; | |
834 | } else { | |
835 | chroma_mv.x += 2; | |
836 | } | |
837 | if chroma_mv.y < 0 { | |
838 | chroma_mv.y += 1; | |
839 | } else { | |
840 | chroma_mv.y += 2; | |
841 | } | |
842 | chroma_mv.x >>= 2; | |
843 | chroma_mv.y >>= 2; | |
844 | ||
845 | if pitch_smode == 0 { | |
846 | mc_block4x4(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4, | |
847 | chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf); | |
848 | mc_block4x4(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4, | |
849 | chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf); | |
850 | } else { | |
851 | mc_block_special(dst, uoff + x * 4, ustride, mb_x * 8 + x * 4, mb_y * 8 + y * 4, | |
852 | chroma_mv.x, chroma_mv.y, refframe.clone(), 1, &mut mc_buf, | |
853 | 4, pitch_smode); | |
854 | mc_block_special(dst, voff + x * 4, vstride, mb_x * 8 + x * 4, mb_y * 8 + y * 4, | |
855 | chroma_mv.x, chroma_mv.y, refframe.clone(), 2, &mut mc_buf, | |
856 | 4, pitch_smode); | |
857 | } | |
858 | } | |
859 | uoff += ustride * 4; | |
860 | voff += vstride * 4; | |
861 | iidx += 2 * self.mv_stride; | |
862 | } | |
863 | } | |
864 | self.add_residue(dframe, mb_x, mb_y, true, pitch_dmode); | |
865 | } | |
866 | fn loop_filter_mb(&mut self, dframe: &mut NASimpleVideoFrame<u8>, mb_x: usize, mb_y: usize, loop_str: u8) { | |
867 | let edge_thr = i16::from(loop_str) + 2; | |
868 | let luma_thr = i16::from(loop_str); | |
869 | let chroma_thr = i16::from(loop_str) * 2; | |
870 | let inner_thr = if self.dstate.loop_sharpness == 0 { | |
871 | i16::from(loop_str) | |
872 | } else { | |
873 | let bound1 = i16::from(9 - self.dstate.loop_sharpness); | |
874 | let shift = (self.dstate.loop_sharpness + 3) >> 2; | |
875 | (i16::from(loop_str) >> shift).min(bound1) | |
876 | }; | |
877 | let hev_thr = i16::from(HIGH_EDGE_VAR_THR[if self.dstate.is_intra { 1 } else { 0 }][loop_str as usize]); | |
878 | ||
879 | let ystride = dframe.stride[0]; | |
880 | let ustride = dframe.stride[1]; | |
881 | let vstride = dframe.stride[2]; | |
882 | let ypos = dframe.offset[0] + mb_x * 16 + mb_y * 16 * ystride; | |
883 | let upos = dframe.offset[1] + mb_x * 8 + mb_y * 8 * ustride; | |
884 | let vpos = dframe.offset[2] + mb_x * 8 + mb_y * 8 * vstride; | |
885 | ||
886 | let (loop_edge, loop_inner) = if self.dstate.lf_simple { | |
887 | (simple_loop_filter as LoopFilterFunc, simple_loop_filter as LoopFilterFunc) | |
888 | } else { | |
889 | (normal_loop_filter_edge as LoopFilterFunc, normal_loop_filter_inner as LoopFilterFunc) | |
890 | }; | |
891 | ||
892 | if mb_x > 0 { | |
893 | loop_edge(dframe.data, ypos, 1, ystride, 16, edge_thr, inner_thr, hev_thr); | |
894 | loop_edge(dframe.data, upos, 1, ustride, 8, edge_thr, inner_thr, hev_thr); | |
895 | loop_edge(dframe.data, vpos, 1, vstride, 8, edge_thr, inner_thr, hev_thr); | |
896 | } | |
897 | if mb_y > 0 { | |
898 | loop_edge(dframe.data, ypos, ystride, 1, 16, edge_thr, inner_thr, hev_thr); | |
899 | loop_edge(dframe.data, upos, ustride, 1, 8, edge_thr, inner_thr, hev_thr); | |
900 | loop_edge(dframe.data, vpos, vstride, 1, 8, edge_thr, inner_thr, hev_thr); | |
901 | } | |
902 | ||
903 | for y in 1..4 { | |
904 | loop_inner(dframe.data, ypos + y * 4 * ystride, ystride, 1, 16, luma_thr, inner_thr, hev_thr); | |
905 | } | |
906 | loop_inner(dframe.data, upos + 4 * ustride, ustride, 1, 8, chroma_thr, inner_thr, hev_thr); | |
907 | loop_inner(dframe.data, vpos + 4 * vstride, vstride, 1, 8, chroma_thr, inner_thr, hev_thr); | |
908 | ||
909 | for x in 1..4 { | |
910 | loop_inner(dframe.data, ypos + x * 4, 1, ystride, 16, luma_thr, inner_thr, hev_thr); | |
911 | } | |
912 | loop_inner(dframe.data, upos + 4, 1, ustride, 8, chroma_thr, inner_thr, hev_thr); | |
913 | loop_inner(dframe.data, vpos + 4, 1, vstride, 8, chroma_thr, inner_thr, hev_thr); | |
914 | } | |
915 | } | |
916 | ||
917 | impl NADecoder for VP7Decoder { | |
918 | fn init(&mut self, supp: &mut NADecoderSupport, info: NACodecInfoRef) -> DecoderResult<()> { | |
919 | if let NACodecTypeInfo::Video(vinfo) = info.get_properties() { | |
920 | let fmt = YUV420_FORMAT; | |
921 | let myvinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, fmt); | |
922 | let myinfo = NACodecTypeInfo::Video(myvinfo); | |
923 | self.info = NACodecInfo::new_ref(info.get_name(), myinfo, info.get_extradata()).into_ref(); | |
924 | ||
925 | supp.pool_u8.set_dec_bufs(4); | |
926 | supp.pool_u8.prealloc_video(NAVideoInfo::new(myvinfo.get_width(), myvinfo.get_height(), false, vinfo.get_format()), 4)?; | |
927 | self.set_dimensions(myvinfo.get_width(), myvinfo.get_height()); | |
928 | Ok(()) | |
929 | } else { | |
930 | Err(DecoderError::InvalidData) | |
931 | } | |
932 | } | |
933 | #[allow(clippy::cognitive_complexity)] | |
934 | fn decode(&mut self, supp: &mut NADecoderSupport, pkt: &NAPacket) -> DecoderResult<NAFrameRef> { | |
935 | let src = pkt.get_buffer(); | |
936 | ||
937 | validate!(src.len() > 4); | |
938 | ||
939 | let frame_tag = read_u24le(src.as_slice())?; | |
940 | self.dstate.is_intra = (frame_tag & 1) == 0; | |
941 | self.dstate.version = ((frame_tag >> 1) & 7) as u8; | |
942 | let part2_off = (frame_tag >> 4) as usize; | |
943 | let part1_off = if self.dstate.version == 0 { 4 } else { 3 }; | |
944 | ||
945 | validate!(src.len() > part1_off + part2_off); | |
946 | let mut bc = BoolCoder::new(&src[part1_off..][..part2_off])?; | |
947 | let mut bc_main = BoolCoder::new(&src[part1_off + part2_off..])?; | |
948 | if self.dstate.is_intra { | |
949 | let width = bc.read_bits(12) as usize; | |
950 | let height = bc.read_bits(12) as usize; | |
951 | let _scalev = bc.read_bits(2); | |
952 | let _scaleh = bc.read_bits(2); | |
953 | validate!((width > 0) && (height > 0)); | |
954 | self.set_dimensions(width, height); | |
955 | ||
956 | self.dstate.reset(); | |
957 | self.scan.copy_from_slice(&DEFAULT_SCAN_ORDER); | |
958 | } else { | |
959 | if !self.shuf.has_refs() { | |
960 | return Err(DecoderError::MissingReference); | |
961 | } | |
962 | } | |
963 | ||
964 | self.read_features(&mut bc)?; | |
965 | ||
966 | let y_ac_q = bc.read_bits(7) as usize; | |
967 | let y_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q }; | |
968 | let y2_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q }; | |
969 | let y2_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q }; | |
970 | let uv_dc_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q }; | |
971 | let uv_ac_q = if bc.read_bool() { bc.read_bits(7) as usize } else { y_ac_q }; | |
972 | self.set_qmat(y_dc_q, y_ac_q, y2_dc_q, y2_ac_q, uv_dc_q, uv_ac_q); | |
973 | ||
974 | let update_gf = if self.dstate.is_intra { true } else { bc.read_bool() }; | |
975 | ||
976 | let mut has_fading_feature = true; | |
977 | let mut keep_probs = true; | |
978 | if self.dstate.version != 0 { | |
979 | keep_probs = bc.read_bool(); | |
980 | if self.dstate.is_intra { | |
981 | has_fading_feature = true; | |
982 | } else { | |
983 | has_fading_feature = bc.read_bool(); | |
984 | } | |
985 | } | |
986 | ||
987 | if has_fading_feature { | |
988 | self.dstate.fading = bc.read_bool(); | |
989 | if self.dstate.fading { | |
990 | self.dstate.fade_alpha = bc.read_sbits(8) as u16; | |
991 | self.dstate.fade_beta = bc.read_sbits(8) as u16; | |
992 | if let Some(pframe) = self.shuf.get_last() { | |
993 | let mut fframe = supp.pool_u8.get_free().unwrap(); | |
994 | let mut dframe = NASimpleVideoFrame::from_video_buf(&mut fframe).unwrap(); | |
995 | fade_frame(pframe, &mut dframe, self.dstate.fade_alpha, self.dstate.fade_beta); | |
996 | self.shuf.add_frame(fframe); | |
997 | } | |
998 | } | |
999 | } else { | |
1000 | self.dstate.fading = false; | |
1001 | } | |
1002 | ||
1003 | if self.dstate.version == 0 { | |
1004 | self.dstate.lf_simple = bc.read_bool(); | |
1005 | } | |
1006 | ||
1007 | if bc.read_bool() { | |
1008 | for i in 1..16 { | |
1009 | self.scan[i] = DEFAULT_SCAN_ORDER[bc.read_bits(4) as usize]; | |
1010 | } | |
1011 | } | |
1012 | ||
1013 | if self.dstate.version != 0 { | |
1014 | self.dstate.lf_simple = bc.read_bool(); | |
1015 | } else { | |
1016 | self.dstate.lf_simple = false; | |
1017 | } | |
1018 | ||
1019 | self.dstate.loop_filter_level = bc.read_bits(6) as u8; | |
1020 | self.dstate.loop_sharpness = bc.read_bits(3) as u8; | |
1021 | ||
1022 | self.read_dct_coef_prob_upd(&mut bc)?; | |
1023 | ||
1024 | if !self.dstate.is_intra { | |
1025 | self.dstate.prob_intra_pred = bc.read_byte(); | |
1026 | self.dstate.prob_last_pred = bc.read_byte(); | |
1027 | if bc.read_bool() { | |
1028 | for i in 0..4 { | |
1029 | self.dstate.kf_ymode_prob[i] = bc.read_byte(); | |
1030 | } | |
1031 | } | |
1032 | if bc.read_bool() { | |
1033 | for i in 0..3 { | |
1034 | self.dstate.kf_uvmode_prob[i] = bc.read_byte(); | |
1035 | } | |
1036 | } | |
1037 | self.read_mv_prob_upd(&mut bc)?; | |
1038 | } | |
1039 | if !keep_probs { | |
1040 | self.tmp_scan.copy_from_slice(&self.scan); | |
1041 | } | |
1042 | ||
1043 | let vinfo = NAVideoInfo::new(self.width, self.height, false, YUV420_FORMAT); | |
1044 | let ret = supp.pool_u8.get_free(); | |
1045 | if ret.is_none() { | |
1046 | return Err(DecoderError::AllocError); | |
1047 | } | |
1048 | let mut buf = ret.unwrap(); | |
1049 | if buf.get_info() != vinfo { | |
1050 | self.shuf.clear(); | |
1051 | supp.pool_u8.reset(); | |
1052 | supp.pool_u8.prealloc_video(vinfo, 4)?; | |
1053 | let ret = supp.pool_u8.get_free(); | |
1054 | if ret.is_none() { | |
1055 | return Err(DecoderError::AllocError); | |
1056 | } | |
1057 | buf = ret.unwrap(); | |
1058 | } | |
1059 | let mut dframe = NASimpleVideoFrame::from_video_buf(&mut buf).unwrap(); | |
1060 | ||
1061 | let mut mb_idx = 0; | |
1062 | self.pcache.reset(); | |
1063 | if self.dstate.is_intra || (self.dstate.version > 0) { | |
1064 | self.dstate.pdc_pred_val = [0; 2]; | |
1065 | self.dstate.pdc_pred_count = [0; 2]; | |
1066 | } | |
1067 | let mut use_last = true; | |
1068 | for mb_y in 0..self.mb_h { | |
1069 | for mb_x in 0..self.mb_w { | |
1070 | self.decode_mb_features(&mut bc, mb_x, mb_y)?; | |
1071 | self.dstate.has_y2 = true; | |
1072 | if self.dstate.is_intra { | |
1073 | let ymode = bc.read_tree(KF_Y_MODE_TREE, KF_Y_MODE_TREE_PROBS); | |
1074 | if ymode == PredMode::BPred { | |
1075 | self.dstate.has_y2 = false; | |
1076 | let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride; | |
1077 | for y in 0..4 { | |
1078 | for x in 0..4 { | |
1079 | let top_mode = if (y > 0) || (mb_y > 0) { | |
1080 | self.ymodes[iidx + x - self.ymode_stride] | |
1081 | } else { | |
1082 | PredMode::DCPred | |
1083 | }; | |
1084 | let left_mode = if (x > 0) || (mb_x > 0) { | |
1085 | self.ymodes[iidx + x - 1] | |
1086 | } else { | |
1087 | PredMode::DCPred | |
1088 | }; | |
1089 | let top_idx = top_mode.to_b_index(); | |
1090 | let left_idx = left_mode.to_b_index(); | |
1091 | let bmode = bc.read_tree(B_MODE_TREE, &KF_B_MODE_TREE_PROBS[top_idx][left_idx]); | |
1092 | self.ymodes[iidx + x] = bmode; | |
1093 | } | |
1094 | iidx += self.ymode_stride; | |
1095 | } | |
1096 | } else { | |
1097 | self.fill_ymode(mb_x, mb_y, ymode.to_b_mode()); | |
1098 | } | |
1099 | let uvmode = bc.read_tree(UV_MODE_TREE, KF_UV_MODE_TREE_PROBS); | |
1100 | self.mb_info[mb_idx].mb_type = VPMBType::Intra; | |
1101 | self.mb_info[mb_idx].ymode = ymode; | |
1102 | self.mb_info[mb_idx].uvmode = uvmode; | |
1103 | } else if !bc.read_prob(self.dstate.prob_intra_pred) { | |
1104 | let ymode = bc.read_tree(Y_MODE_TREE, &self.dstate.kf_ymode_prob); | |
1105 | if ymode == PredMode::BPred { | |
1106 | self.dstate.has_y2 = false; | |
1107 | let mut iidx = mb_x * 4 + mb_y * 4 * self.ymode_stride; | |
1108 | for _y in 0..4 { | |
1109 | for x in 0..4 { | |
1110 | let bmode = bc.read_tree(B_MODE_TREE, B_MODE_TREE_PROBS); | |
1111 | self.ymodes[iidx + x] = bmode; | |
1112 | } | |
1113 | iidx += self.ymode_stride; | |
1114 | } | |
1115 | } else { | |
1116 | self.fill_ymode(mb_x, mb_y, PredMode::Inter); | |
1117 | } | |
1118 | let uvmode = bc.read_tree(UV_MODE_TREE, &self.dstate.kf_uvmode_prob); | |
1119 | self.mb_info[mb_idx].mb_type = VPMBType::Intra; | |
1120 | self.mb_info[mb_idx].ymode = ymode; | |
1121 | self.mb_info[mb_idx].uvmode = uvmode; | |
1122 | self.fill_mv(mb_x, mb_y, ZERO_MV); | |
1123 | } else { | |
1124 | use_last = !bc.read_prob(self.dstate.prob_last_pred); | |
1125 | ||
1126 | let (mvprobs, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y); | |
1127 | let mbtype = bc.read_tree(MV_REF_TREE, &mvprobs); | |
1128 | ||
1129 | match mbtype { | |
1130 | VPMBType::InterNearest => { | |
1131 | self.fill_mv(mb_x, mb_y, nearest_mv); | |
1132 | }, | |
1133 | VPMBType::InterNear => { | |
1134 | self.fill_mv(mb_x, mb_y, near_mv); | |
1135 | }, | |
1136 | VPMBType::InterNoMV => { | |
1137 | self.fill_mv(mb_x, mb_y, ZERO_MV); | |
1138 | }, | |
1139 | VPMBType::InterMV => { | |
1140 | let dmy = decode_mv_component(&mut bc, &self.dstate.mv_probs[0]); | |
1141 | let dmx = decode_mv_component(&mut bc, &self.dstate.mv_probs[1]); | |
1142 | let new_mv = pred_mv + MV{ x: dmx, y: dmy }; | |
1143 | self.fill_mv(mb_x, mb_y, new_mv); | |
1144 | }, | |
1145 | VPMBType::InterFourMV => { | |
1146 | self.do_split_mv(&mut bc, mb_x, mb_y, pred_mv)?; | |
1147 | }, | |
1148 | _ => unreachable!(), | |
1149 | }; | |
1150 | ||
1151 | self.fill_ymode(mb_x, mb_y, PredMode::Inter); | |
1152 | self.mb_info[mb_idx].mb_type = mbtype; | |
1153 | self.mb_info[mb_idx].ymode = PredMode::Inter; | |
1154 | self.mb_info[mb_idx].uvmode = PredMode::Inter; | |
1155 | } | |
1156 | self.decode_residue(&mut bc_main, mb_x, mb_idx, use_last); | |
1157 | match self.mb_info[mb_idx].mb_type { | |
1158 | VPMBType::Intra => { | |
1159 | self.recon_intra_mb(&mut dframe, mb_x, mb_y)?; | |
1160 | }, | |
1161 | _ => { | |
1162 | self.recon_inter_mb(&mut dframe, mb_x, mb_y, use_last); | |
1163 | }, | |
1164 | } | |
1165 | if let Some(loop_str) = self.dstate.force_loop_str { | |
1166 | self.mb_info[mb_idx].loop_str = loop_str; | |
1167 | } else { | |
1168 | self.mb_info[mb_idx].loop_str = self.dstate.loop_filter_level; | |
1169 | } | |
1170 | self.mb_info[mb_idx].upd_gf = self.dstate.force_gf_update; | |
1171 | mb_idx += 1; | |
1172 | } | |
1173 | self.pcache.update_row(); | |
1174 | } | |
1175 | let mut mb_idx = 0; | |
1176 | for mb_y in 0..self.mb_h { | |
1177 | for mb_x in 0..self.mb_w { | |
1178 | let loop_str = self.mb_info[mb_idx].loop_str; | |
1179 | self.loop_filter_mb(&mut dframe, mb_x, mb_y, loop_str); | |
1180 | mb_idx += 1; | |
1181 | } | |
1182 | } | |
1183 | if !update_gf && self.dstate.features[2].is_some() { | |
1184 | let gf = self.shuf.get_golden().unwrap(); | |
1185 | let mut new_gf = supp.pool_u8.get_copy(&gf).unwrap(); | |
1186 | let dframe = NASimpleVideoFrame::from_video_buf(&mut new_gf).unwrap(); | |
1187 | let mut mb_idx = 0; | |
1188 | let mut mc_buf = self.mc_buf.get_data_mut().unwrap(); | |
1189 | for mb_y in 0..self.mb_h { | |
1190 | for mb_x in 0..self.mb_w { | |
1191 | if self.mb_info[mb_idx].upd_gf { | |
1192 | mc_block16x16(dframe.data, dframe.offset[0] + mb_x * 16 + mb_y * 16 * dframe.stride[0], dframe.stride[0], mb_x * 16, mb_y * 16, 0, 0, buf.clone(), 0, &mut mc_buf); | |
1193 | mc_block8x8(dframe.data, dframe.offset[1] + mb_x * 8 + mb_y * 8 * dframe.stride[1], dframe.stride[1], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 1, &mut mc_buf); | |
1194 | mc_block8x8(dframe.data, dframe.offset[2] + mb_x * 8 + mb_y * 8 * dframe.stride[2], dframe.stride[2], mb_x * 8, mb_y * 8, 0, 0, buf.clone(), 2, &mut mc_buf); | |
1195 | } | |
1196 | mb_idx += 1; | |
1197 | } | |
1198 | } | |
1199 | self.shuf.add_golden_frame(new_gf); | |
1200 | } | |
1201 | ||
1202 | if !keep_probs { | |
1203 | self.scan.copy_from_slice(&self.tmp_scan); | |
1204 | } | |
1205 | if update_gf { | |
1206 | self.shuf.add_golden_frame(buf.clone()); | |
1207 | } | |
1208 | self.shuf.add_frame(buf.clone()); | |
1209 | ||
1210 | let mut frm = NAFrame::new_from_pkt(pkt, self.info.clone(), NABufferType::Video(buf)); | |
1211 | frm.set_keyframe(self.dstate.is_intra); | |
1212 | frm.set_frame_type(if self.dstate.is_intra { FrameType::I } else { FrameType::P }); | |
1213 | Ok(frm.into_ref()) | |
1214 | } | |
1215 | fn flush(&mut self) { | |
1216 | self.shuf.clear(); | |
1217 | } | |
1218 | } | |
1219 | ||
1220 | impl NAOptionHandler for VP7Decoder { | |
1221 | fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] } | |
1222 | fn set_options(&mut self, _options: &[NAOption]) { } | |
1223 | fn query_option_value(&self, _name: &str) -> Option<NAValue> { None } | |
1224 | } | |
1225 | ||
1226 | pub fn get_decoder() -> Box<dyn NADecoder + Send> { | |
1227 | Box::new(VP7Decoder::new()) | |
1228 | } | |
1229 | ||
1230 | #[cfg(test)] | |
1231 | mod test { | |
1232 | use nihav_core::codecs::RegisteredDecoders; | |
1233 | use nihav_core::demuxers::RegisteredDemuxers; | |
1234 | use nihav_codec_support::test::dec_video::*; | |
1235 | use crate::duck_register_all_decoders; | |
1236 | use nihav_commonfmt::generic_register_all_demuxers; | |
1237 | ||
1238 | #[test] | |
1239 | fn test_vp7() { | |
1240 | let mut dmx_reg = RegisteredDemuxers::new(); | |
1241 | generic_register_all_demuxers(&mut dmx_reg); | |
1242 | let mut dec_reg = RegisteredDecoders::new(); | |
1243 | duck_register_all_decoders(&mut dec_reg); | |
1244 | ||
1245 | // sample from https://trac.ffmpeg.org/ticket/5580 | |
1246 | test_decoding("avi", "vp7", "assets/Duck/interlaced_blit_pitch.avi", Some(12), &dmx_reg, | |
1247 | &dec_reg, ExpectedTestResult::MD5Frames(vec![ | |
1248 | [0xb79fb6f8, 0xed51ac9e, 0x9e423456, 0xc0918e7f], | |
1249 | [0xbf8d1274, 0x83515e15, 0x8c0887de, 0xfbfd05d3], | |
1250 | [0x8ad00466, 0x80b6cbfb, 0x54de408e, 0x9efbc05e], | |
1251 | [0x144122c5, 0x6897b553, 0x93474d29, 0x1a1274ec], | |
1252 | [0x06ff5d07, 0x55825d38, 0x072b0a78, 0xfcb5020f], | |
1253 | [0xfd01591b, 0xc42113e7, 0xc5a5550f, 0xb30f3b02], | |
1254 | [0x155e0d6e, 0x96d75e06, 0x9bd7ce87, 0xacf868e1], | |
1255 | [0xfd79103a, 0x695d21d3, 0xfeacb5b4, 0x1d869d08], | |
1256 | [0xf4bcfeac, 0x0d2c305c, 0x11416c96, 0x626a5ef6], | |
1257 | [0x3579b66c, 0x0a7d7dc0, 0xe80b0395, 0xf6a70661], | |
1258 | [0x5773768c, 0x813442e9, 0x4dd6f793, 0xb10fe55f], | |
1259 | [0xcaaf0ddb, 0x65c2410e, 0x95da5bba, 0x3b90128e], | |
1260 | [0x74773773, 0xe1dbadeb, 0x57aaf64b, 0x9c21e3c7]])); | |
1261 | } | |
1262 | } |