1 use nihav_core::frame::*;
2 use nihav_codec_support::codecs::{MV, ZERO_MV};
3 use super::super::vpcommon::*;
8 /*#[cfg(debug_assertions)]
10 #[cfg(debug_assertions)]
12 #[cfg(debug_assertions)]
13 pub fn dump_pgm(vbuf: &NAVideoBuffer<u8>, name: &str) {
14 let dst = vbuf.get_data();
15 let (w, h) = vbuf.get_dimensions(0);
16 let mut file = File::create(name).unwrap();
17 file.write_all(format!("P5\n{} {}\n255\n", w, h * 3 / 2).as_bytes()).unwrap();
18 for row in dst[vbuf.get_offset(0)..].chunks(vbuf.get_stride(0)).take(h).rev() {
19 file.write_all(row).unwrap();
21 for (row1, row2) in dst[vbuf.get_offset(1)..].chunks(vbuf.get_stride(1)).take(h / 2).zip(dst[vbuf.get_offset(2)..].chunks(vbuf.get_stride(2))).rev() {
22 file.write_all(row1).unwrap();
23 file.write_all(row2).unwrap();
27 pub type Coeffs = [[i16; 64]; 6];
30 pub struct ResidueMB {
41 for blk in self.coeffs.iter_mut() {
46 for blk in self.coeffs.iter_mut() {
50 fn quant(&mut self, q: usize) {
51 for blk in self.coeffs.iter_mut() {
53 blk[0] /= VP56_DC_QUANTS[q] * 4;
55 for coef in blk[1..].iter_mut() {
57 *coef /= VP56_AC_QUANTS[q] * 4;
62 fn dequant(&mut self, q: usize) {
63 for blk in self.coeffs.iter_mut() {
65 blk[0] *= VP56_DC_QUANTS[q] * 4;
67 for coef in blk[1..].iter_mut() {
69 *coef *= VP56_AC_QUANTS[q] * 4;
74 fn dequant_from(&mut self, src: &Self, q: usize) {
75 for (dblk, sblk) in self.coeffs.iter_mut().zip(src.coeffs.iter()) {
76 dblk[0] = if sblk[0] != 0 { sblk[0] * VP56_DC_QUANTS[q] * 4 } else { 0 };
77 for (dcoef, &scoef) in dblk[1..].iter_mut().zip(sblk[1..].iter()) {
78 *dcoef = if scoef != 0 { scoef * VP56_AC_QUANTS[q] * 4 } else { 0 };
82 fn fill(&self, dst: &mut [[u8; 64]; 6]) {
83 for (dblk, sblk) in dst.iter_mut().zip(self.coeffs.iter()) {
84 for (dcoef, &scoef) in dblk.iter_mut().zip(sblk.iter()) {
93 pub residue: ResidueMB,
94 pub reference: Coeffs,
101 residue: ResidueMB::new(),
102 reference: [[0; 64]; 6],
108 const VP56_DC_QUANTS: [i16; 64] = [
109 47, 47, 47, 47, 45, 43, 43, 43,
110 43, 43, 42, 41, 41, 40, 40, 40,
111 40, 35, 35, 35, 35, 33, 33, 33,
112 33, 32, 32, 32, 27, 27, 26, 26,
113 25, 25, 24, 24, 23, 23, 19, 19,
114 19, 19, 18, 18, 17, 16, 16, 16,
115 16, 16, 15, 11, 11, 11, 10, 10,
116 9, 8, 7, 5, 3, 3, 2, 2
118 const VP56_AC_QUANTS: [i16; 64] = [
119 94, 92, 90, 88, 86, 82, 78, 74,
120 70, 66, 62, 58, 54, 53, 52, 51,
121 50, 49, 48, 47, 46, 45, 44, 43,
122 42, 40, 39, 37, 36, 35, 34, 33,
123 32, 31, 30, 29, 28, 27, 26, 25,
124 24, 23, 22, 21, 20, 19, 18, 17,
125 16, 15, 14, 13, 12, 11, 10, 9,
126 8, 7, 6, 5, 4, 3, 2, 1
129 const VP56_FILTER_LIMITS: [u8; 64] = [
130 14, 14, 13, 13, 12, 12, 10, 10,
131 10, 10, 8, 8, 8, 8, 8, 8,
132 8, 8, 8, 8, 8, 8, 8, 8,
133 8, 8, 8, 8, 8, 8, 8, 8,
134 8, 8, 8, 8, 7, 7, 7, 7,
135 7, 7, 6, 6, 6, 6, 6, 6,
136 5, 5, 5, 5, 4, 4, 4, 4,
137 4, 4, 4, 3, 3, 3, 3, 2
141 pub struct FrameEncoder {
143 pub src_mbs: Vec<ResidueMB>,
144 pub intra_mbs: Vec<ResidueMB>,
145 pub inter_mbs: Vec<InterMB>,
146 pub fourmv_mbs: Vec<InterMB>,
147 pub golden_mbs: Vec<InterMB>,
149 pub mb_types: Vec<VPMBType>,
151 pub coded_mv: Vec<[MV; 4]>,
152 pub fmv_sub: Vec<[VPMBType; 4]>,
157 pub me_mode: MVSearchMode,
161 macro_rules! read_block {
162 ($dst: expr, $src: expr, $stride: expr) => {
163 for (drow, srow) in $dst.chunks_mut(8).zip($src.chunks($stride).take(8)) {
164 for (dst, &src) in drow.iter_mut().zip(srow.iter()) {
165 *dst = i16::from(src);
171 macro_rules! write_block {
172 ($dst: expr, $src: expr, $stride: expr) => {
173 for (drow, srow) in $dst.chunks_mut($stride).take(8).zip($src.chunks(8)) {
174 drow[..8].copy_from_slice(srow);
180 pub fn new() -> Self { Self::default() }
181 pub fn resize(&mut self, mb_w: usize, mb_h: usize) {
185 let num_mbs = self.mb_w * self.mb_h;
186 self.src_mbs.clear();
187 self.src_mbs.reserve(num_mbs);
188 self.intra_mbs.clear();
189 self.intra_mbs.reserve(num_mbs);
190 self.inter_mbs.clear();
191 self.inter_mbs.reserve(num_mbs);
192 self.fourmv_mbs.clear();
193 self.fourmv_mbs.reserve(num_mbs);
194 self.golden_mbs.clear();
195 self.golden_mbs.reserve(num_mbs);
197 self.mb_types.clear();
198 self.mb_types.reserve(num_mbs);
200 self.num_mv.reserve(num_mbs);
201 self.coded_mv.clear();
202 self.coded_mv.reserve(num_mbs);
203 self.fmv_sub.clear();
204 self.fmv_sub.reserve(num_mbs);
206 pub fn set_quant(&mut self, quant: usize) { self.quant = quant; }
207 pub fn read_mbs(&mut self, vbuf: &NAVideoBuffer<u8>) {
208 let src = vbuf.get_data();
209 let y = &src[vbuf.get_offset(0)..];
210 let ystride = vbuf.get_stride(0);
211 let u = &src[vbuf.get_offset(1)..];
212 let ustride = vbuf.get_stride(1);
213 let v = &src[vbuf.get_offset(2)..];
214 let vstride = vbuf.get_stride(2);
215 let (w, _) = vbuf.get_dimensions(0);
217 self.src_mbs.clear();
218 for (ys, (us, vs)) in y.chunks(ystride * 16).zip(u.chunks(ustride * 8).zip(v.chunks(vstride * 8))) {
219 for x in (0..w).step_by(16) {
220 let mut mb = ResidueMB::new();
221 for (i, blk) in mb.coeffs[..4].iter_mut().enumerate() {
222 read_block!(blk, ys[x + (i & 1) * 8 + (i >> 1) * 8 * ystride..], ystride);
224 read_block!(mb.coeffs[4], us[x/2..], ustride);
225 read_block!(mb.coeffs[5], vs[x/2..], vstride);
226 self.src_mbs.push(mb);
230 pub fn reconstruct_frame(&mut self, dc_pred: &mut VP56DCPred, mut vbuf: NAVideoBufferRef<u8>) {
231 let mut blocks = [[0u8; 64]; 6];
233 let mut yoff = vbuf.get_offset(0);
234 let mut uoff = vbuf.get_offset(1);
235 let mut voff = vbuf.get_offset(2);
236 let ystride = vbuf.get_stride(0);
237 let ustride = vbuf.get_stride(1);
238 let vstride = vbuf.get_stride(2);
239 let dst = vbuf.get_data_mut().unwrap();
243 let quant = self.quant;
245 for _mb_y in 0..self.mb_h {
246 for mb_x in 0..self.mb_w {
247 let mb_type = self.mb_types[mb_pos];
248 let mb = self.get_mb_mut(mb_pos);
249 for (i, blk) in mb.coeffs.iter_mut().enumerate() {
250 dc_pred.predict_dc(mb_type, i, blk, false);
254 let mb = self.get_mb(mb_pos);
255 if mb_type.is_intra() {
256 for (dblk, sblk) in blocks.iter_mut().zip(mb.coeffs.iter()) {
257 for (dcoef, &scoef) in dblk.iter_mut().zip(sblk.iter()) {
258 *dcoef = (scoef + 128).max(0).min(255) as u8;
262 let res_mb = match mb_type.get_ref_id() {
264 1 => if mb_type != VPMBType::InterFourMV {
265 &self.inter_mbs[mb_pos].reference
267 &self.fourmv_mbs[mb_pos].reference
269 _ => &self.golden_mbs[mb_pos].reference,
272 for (dblk, (sblk1, sblk2)) in blocks.iter_mut().zip(mb.coeffs.iter().zip(res_mb.iter())) {
273 for (dcoef, (&scoef1, &scoef2)) in dblk.iter_mut().zip(sblk1.iter().zip(sblk2.iter())) {
274 *dcoef = (scoef1 + scoef2).max(0).min(255) as u8;
280 write_block!(&mut dst[yoff + mb_x * 16 + (i & 1) * 8 + (i >> 1) * 8 * ystride..],
283 write_block!(&mut dst[uoff + mb_x * 8..], blocks[4], ustride);
284 write_block!(&mut dst[voff + mb_x * 8..], blocks[5], vstride);
289 yoff += ystride * 16;
292 dc_pred.update_row();
294 /*#[cfg(debug_assertions)]
295 dump_pgm(&vbuf, "/home/kst/devel/NihAV-rust/assets/test_out/debug.pgm");*/
297 pub fn get_mb(&self, mb_pos: usize) -> &ResidueMB {
298 let mb_type = self.mb_types[mb_pos];
299 match mb_type.get_ref_id() {
300 0 => &self.intra_mbs[mb_pos],
301 1 => if mb_type != VPMBType::InterFourMV {
302 &self.inter_mbs[mb_pos].residue
304 &self.fourmv_mbs[mb_pos].residue
306 _ => &self.golden_mbs[mb_pos].residue,
309 fn get_mb_mut(&mut self, mb_pos: usize) -> &mut ResidueMB {
310 let mb_type = self.mb_types[mb_pos];
311 match mb_type.get_ref_id() {
312 0 => &mut self.intra_mbs[mb_pos],
313 1 => if mb_type != VPMBType::InterFourMV {
314 &mut self.inter_mbs[mb_pos].residue
316 &mut self.fourmv_mbs[mb_pos].residue
318 _ => &mut self.golden_mbs[mb_pos].residue,
321 pub fn prepare_intra_blocks(&mut self) {
322 self.intra_mbs.clear();
323 self.mb_types.clear();
324 for smb in self.src_mbs.iter() {
325 let mut dmb = smb.clone();
327 for blk in dmb.coeffs.iter_mut() {
330 dmb.quant(self.quant);
331 self.mb_types.push(VPMBType::Intra);
332 self.intra_mbs.push(dmb);
335 pub fn prepare_inter_blocks(&mut self, golden: bool) {
336 let inter_mbs = if !golden { &mut self.inter_mbs } else { &mut self.golden_mbs };
337 for (mb_idx, mb) in inter_mbs.iter_mut().enumerate() {
339 mb.residue.quant(self.quant);
340 self.mb_types[mb_idx] = VPMBType::InterMV;
343 pub fn estimate_mvs(&mut self, ref_frame: NAVideoBufferRef<u8>, mc_buf: NAVideoBufferRef<u8>, golden: bool) {
344 let loop_thr = i16::from(VP56_FILTER_LIMITS[self.quant as usize]);
346 let inter_mbs = if !golden { &mut self.inter_mbs } else { &mut self.golden_mbs };
348 if inter_mbs.is_empty() {
349 for _ in 0..self.mb_w * self.mb_h {
350 inter_mbs.push(InterMB::new());
354 let mut cur_blk = [[0u8; 64]; 6];
356 let mut mv_est = MVEstimator::new(ref_frame, mc_buf, loop_thr, self.me_range);
358 let mut mv_search: Box<dyn MVSearch> = match self.me_mode {
359 MVSearchMode::Full => Box::new(FullMVSearch::new()),
360 MVSearchMode::Diamond => Box::new(DiaSearch::new()),
361 MVSearchMode::Hexagon => Box::new(HexSearch::new()),
364 for (mb_y, row) in inter_mbs.chunks_mut(self.mb_w).enumerate() {
365 for (mb_x, mb) in row.iter_mut().enumerate() {
366 self.src_mbs[mb_pos].fill(&mut cur_blk);
368 let (best_mv, _best_dist) = mv_search.search_mb(&mut mv_est, &cur_blk, mb_x, mb_y);
372 mv_est.mc_block(i, 0, mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8, best_mv);
373 sub_blk(&mut mb.residue.coeffs[i], &cur_blk[i], &mv_est.ref_blk[i]);
376 mv_est.mc_block(plane + 3, plane, mb_x * 8, mb_y * 8, best_mv);
377 sub_blk(&mut mb.residue.coeffs[plane + 3], &cur_blk[plane + 3], &mv_est.ref_blk[plane + 3]);
380 for (dblk, sblk) in mb.reference.iter_mut().zip(mv_est.ref_blk.iter()) {
381 for (dst, &src) in dblk.iter_mut().zip(sblk.iter()) {
382 *dst = i16::from(src);
389 fn estimate_fourmv(&mut self, ref_frame: NAVideoBufferRef<u8>, mc_buf: NAVideoBufferRef<u8>, mb_x: usize, mb_y: usize) -> bool {
390 let loop_thr = i16::from(VP56_FILTER_LIMITS[self.quant as usize]);
392 if self.fourmv_mbs.is_empty() {
393 for _ in 0..self.mb_w * self.mb_h {
394 self.fourmv_mbs.push(InterMB::new());
397 if self.fmv_sub.is_empty() {
398 self.fmv_sub.resize(self.mb_w * self.mb_h, [VPMBType::Intra; 4]);
401 let mb_pos = mb_x + mb_y * self.mb_w;
402 let mb = &mut self.fourmv_mbs[mb_pos];
404 let mut cur_blk = [[0u8; 64]; 6];
405 self.src_mbs[mb_pos].fill(&mut cur_blk);
407 let mut mv_est = MVEstimator::new(ref_frame, mc_buf, loop_thr, self.me_range);
409 let mut mv_search: Box<dyn MVSearch> = match self.me_mode {
410 MVSearchMode::Full => Box::new(FullMVSearch::new()),
411 MVSearchMode::Diamond => Box::new(DiaSearch::new()),
412 MVSearchMode::Hexagon => Box::new(HexSearch::new()),
416 let xpos = mb_x * 16 + (i & 1) * 8;
417 let ypos = mb_y * 16 + (i >> 1) * 8;
418 let (best_mv, _best_dist) = mv_search.search_blk(&mut mv_est, &cur_blk[i], xpos, ypos);
421 let mvsum = mb.mv[0] + mb.mv[1] + mb.mv[2] + mb.mv[3];
422 let chroma_mv = MV{ x: mvsum.x / 4, y: mvsum.y / 4};
424 for (i, blk) in mb.residue.coeffs[..4].iter_mut().enumerate() {
425 let xpos = mb_x * 16 + (i & 1) * 8;
426 let ypos = mb_y * 16 + (i >> 1) * 8;
427 mv_est.mc_block(i, 0, xpos, ypos, mb.mv[i]);
428 sub_blk(blk, &cur_blk[i], &mv_est.ref_blk[i]);
431 mv_est.mc_block(plane + 3, plane, mb_x * 8, mb_y * 8, chroma_mv);
432 sub_blk(&mut mb.residue.coeffs[plane + 3], &cur_blk[plane + 3], &mv_est.ref_blk[plane + 3]);
435 for (dblk, sblk) in mb.reference.iter_mut().zip(mv_est.ref_blk.iter()) {
436 for (dst, &src) in dblk.iter_mut().zip(sblk.iter()) {
437 *dst = i16::from(src);
441 (mb.mv[0] != mb.mv[1]) || (mb.mv[0] != mb.mv[2]) || (mb.mv[0] != mb.mv[3])
443 pub fn select_inter_blocks(&mut self, ref_frame: NAVideoBufferRef<u8>, mc_buf: NAVideoBufferRef<u8>, has_golden_frame: bool, lambda: f32) {
444 let mut tmp_mb = ResidueMB::new();
445 for mb_idx in 0..self.mb_w * self.mb_h {
446 tmp_mb.dequant_from(&self.intra_mbs[mb_idx], self.quant);
448 for blk in tmp_mb.coeffs.iter_mut() {
449 for coef in blk.iter_mut() {
450 *coef = (*coef + 128).max(0).min(255);
453 let intra_dist = calc_mb_dist(&self.src_mbs[mb_idx], &tmp_mb);
454 let intra_nits = estimate_intra_mb_nits(&self.intra_mbs[mb_idx].coeffs, self.quant);
455 let intra_cost = (intra_dist as f32) + lambda * (intra_nits as f32);
457 tmp_mb.dequant_from(&self.inter_mbs[mb_idx].residue, self.quant);
459 for (blk, res) in tmp_mb.coeffs.iter_mut().zip(self.inter_mbs[mb_idx].reference.iter()) {
460 for (coef, add) in blk.iter_mut().zip(res.iter()) {
461 *coef = (*coef + add).max(0).min(255);
464 let inter_dist = calc_mb_dist(&self.src_mbs[mb_idx], &tmp_mb);
465 let mut inter_nits = estimate_inter_mb_nits(&self.inter_mbs[mb_idx], self.quant, false);
466 if self.inter_mbs[mb_idx].mv[3] != ZERO_MV {
467 inter_nits += estimate_mv_nits(self.inter_mbs[mb_idx].mv[3]);
469 let mut inter_cost = (inter_dist as f32) + lambda * (inter_nits as f32);
471 if inter_cost < intra_cost {
472 self.mb_types[mb_idx] = VPMBType::InterMV;
474 if inter_dist > 512 {
475 self.estimate_fourmv(ref_frame.clone(), mc_buf.clone(), mb_idx % self.mb_w, mb_idx / self.mb_w);
476 self.fourmv_mbs[mb_idx].residue.fdct();
477 self.fourmv_mbs[mb_idx].residue.quant(self.quant);
479 tmp_mb.dequant_from(&self.fourmv_mbs[mb_idx].residue, self.quant);
481 for (blk, res) in tmp_mb.coeffs.iter_mut().zip(self.fourmv_mbs[mb_idx].reference.iter()) {
482 for (coef, add) in blk.iter_mut().zip(res.iter()) {
483 *coef = (*coef + add).max(0).min(255);
486 let fourmv_dist = calc_mb_dist(&self.src_mbs[mb_idx], &tmp_mb);
487 let fourmv_nits = estimate_inter_mb_nits(&self.fourmv_mbs[mb_idx], self.quant, true);
488 let fourmv_cost = (fourmv_dist as f32) + lambda * (fourmv_nits as f32);
489 if fourmv_cost < inter_cost {
490 self.mb_types[mb_idx] = VPMBType::InterFourMV;
491 inter_cost = fourmv_cost;
496 if has_golden_frame {
497 tmp_mb.dequant_from(&self.golden_mbs[mb_idx].residue, self.quant);
499 for (blk, res) in tmp_mb.coeffs.iter_mut().zip(self.golden_mbs[mb_idx].reference.iter()) {
500 for (coef, add) in blk.iter_mut().zip(res.iter()) {
501 *coef = (*coef + add).max(0).min(255);
504 let golden_dist = calc_mb_dist(&self.src_mbs[mb_idx], &tmp_mb);
505 let golden_nits = estimate_inter_mb_nits(&self.golden_mbs[mb_idx], self.quant, false);
506 let golden_cost = (golden_dist as f32) + lambda * (golden_nits as f32);
508 if (self.mb_types[mb_idx].is_intra() && golden_cost < intra_cost) ||
509 (!self.mb_types[mb_idx].is_intra() && golden_cost < inter_cost) {
510 self.mb_types[mb_idx] = VPMBType::GoldenMV;
515 pub fn decide_frame_type(&self) -> (bool, bool) {
516 let mut intra_count = 0usize;
517 let mut non_intra = 0usize;
518 for mb_type in self.mb_types.iter() {
519 if mb_type.is_intra() {
525 (intra_count > non_intra * 3, intra_count > non_intra)
527 fn find_mv_pred(&self, mb_x: usize, mb_y: usize, ref_id: u8) -> (usize, MV, MV, MV) {
528 const CAND_POS: [(i8, i8); 12] = [
537 let mut nearest_mv = ZERO_MV;
538 let mut near_mv = ZERO_MV;
539 let mut pred_mv = ZERO_MV;
540 let mut num_mv: usize = 0;
542 for (i, (yoff, xoff)) in CAND_POS.iter().enumerate() {
543 let cx = (mb_x as isize) + (*xoff as isize);
544 let cy = (mb_y as isize) + (*yoff as isize);
545 if (cx < 0) || (cy < 0) {
548 let cx = cx as usize;
549 let cy = cy as usize;
550 if (cx >= self.mb_w) || (cy >= self.mb_h) {
553 let mb_pos = cx + cy * self.mb_w;
554 let mv = match self.mb_types[mb_pos].get_ref_id() {
556 1 => if self.mb_types[mb_pos] != VPMBType::InterFourMV {
557 self.inter_mbs[mb_pos].mv[3]
559 self.fourmv_mbs[mb_pos].mv[3]
561 _ => self.golden_mbs[mb_pos].mv[3],
563 if (self.mb_types[mb_pos].get_ref_id() != ref_id) || (mv == ZERO_MV) {
572 } else if mv != nearest_mv {
579 (num_mv, nearest_mv, near_mv, pred_mv)
581 pub fn predict_mvs(&mut self) {
584 if self.coded_mv.is_empty() {
585 self.coded_mv.resize(self.mb_w * self.mb_h, [ZERO_MV; 4]);
587 for mb_y in 0..self.mb_h {
588 for mb_x in 0..self.mb_w {
589 let (num_mv, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y, VP_REF_INTER);
590 let mb_type = self.mb_types[mb_idx];
591 self.num_mv.push(num_mv as u8);
592 let golden = mb_type.get_ref_id() == VP_REF_GOLDEN;
593 let mv = if !golden { self.inter_mbs[mb_idx].mv[3] } else { self.golden_mbs[mb_idx].mv[3] };
595 let mb_type = if mb_type == VPMBType::Intra {
597 } else if mb_type == VPMBType::InterFourMV {
599 let mv = self.fourmv_mbs[mb_idx].mv[i];
600 self.coded_mv[mb_idx][i] = ZERO_MV;
602 self.fmv_sub[mb_idx][i] = VPMBType::InterNoMV;
604 self.fmv_sub[mb_idx][i] = match num_mv {
606 self.coded_mv[mb_idx][i] = mv - pred_mv;
610 if nearest_mv == mv {
611 VPMBType::InterNearest
613 self.coded_mv[mb_idx][i] = mv - pred_mv;
618 if nearest_mv == mv {
619 VPMBType::InterNearest
620 } else if near_mv == mv {
623 self.coded_mv[mb_idx][i] = mv - pred_mv;
630 VPMBType::InterFourMV
631 } else if mv == ZERO_MV {
637 } else if mb_type.get_ref_id() == VP_REF_INTER {
638 self.coded_mv[mb_idx][3] = mv;
640 0 => VPMBType::InterMV,
642 if nearest_mv == mv {
643 VPMBType::InterNearest
645 self.coded_mv[mb_idx][3] = mv - pred_mv;
650 if nearest_mv == mv {
651 VPMBType::InterNearest
652 } else if near_mv == mv {
655 self.coded_mv[mb_idx][3] = mv - pred_mv;
661 let (num_mv, nearest_mv, near_mv, pred_mv) = self.find_mv_pred(mb_x, mb_y, VP_REF_GOLDEN);
662 self.coded_mv[mb_idx][3] = ZERO_MV;
665 self.coded_mv[mb_idx][3] = mv - pred_mv;
669 if nearest_mv == mv {
670 VPMBType::GoldenNearest
672 self.coded_mv[mb_idx][3] = mv - pred_mv;
677 if nearest_mv == mv {
678 VPMBType::GoldenNearest
679 } else if near_mv == mv {
682 self.coded_mv[mb_idx][3] = mv - pred_mv;
688 self.mb_types[mb_idx] = mb_type;
693 pub fn apply_dc_prediction(&mut self, dc_pred: &mut VP56DCPred) {
697 for _mb_y in 0..self.mb_h {
698 for _mb_x in 0..self.mb_w {
699 let mb_type = self.mb_types[mb_idx];
700 let mb = self.get_mb_mut(mb_idx);
701 for (i, blk) in mb.coeffs.iter_mut().enumerate() {
702 dc_pred.predict_dc(mb_type, i, blk, true);
707 dc_pred.update_row();