]> git.nihav.org Git - nihav.git/blame - nihav-duck/src/codecs/vp7enc/mb_coding.rs
h264: cache data before use in fill_deblock()
[nihav.git] / nihav-duck / src / codecs / vp7enc / mb_coding.rs
CommitLineData
c5d5793c
KS
1use nihav_codec_support::codecs::{MV, ZERO_MV};
2use super::super::vp78::{PredMode, MVSplitMode, SubMVRef};
3use super::super::vp78dsp::*;
4use super::blocks::*;
5use super::coder::*;
6use super::models::*;
7use super::motion_est::*;
8use super::rdo::*;
9
10pub struct IntraModePredCtx<'a> {
11 pub metric: &'a RateDistMetric,
12 pub models: &'a VP7Models,
13 pub q: usize,
14 pub tr: [u8; 4],
15 pub ipred_y: IPredContext,
16 pub ipred_u: IPredContext,
17 pub ipred_v: IPredContext,
18 pub pctx: BlockPCtx,
19}
20
21struct UniqueList<A> {
22 list: [A; 4],
23 fill: usize,
24}
25
26impl<A:Copy+Default+PartialEq> UniqueList<A> {
27 fn new() -> Self {
28 Self { list: [A::default(); 4], fill: 0 }
29 }
30 fn add(&mut self, cand: A) {
31 if self.fill == self.list.len() { return; }
32 let mut unique = true;
33 for el in self.list.iter().take(self.fill) {
34 if *el == cand {
35 unique = false;
36 break;
37 }
38 }
39 if unique {
40 self.list[self.fill] = cand;
41 self.fill += 1;
42 }
43 }
44 fn get_list(&self) -> &[A] { &self.list[..self.fill] }
45}
46
47pub fn try_i4x4_pred(mut src: LumaIterator, modes: &mut [PredMode; 16], res: &mut Residue, new: &mut [u8; 256], pctx: &IntraModePredCtx, ref_best_dist: u32) -> u32 {
48 const PRED4X4: [PredMode; 10] = [
49 PredMode::DCPred, PredMode::HPred, PredMode::VPred, PredMode::TMPred,
50 PredMode::LDPred, PredMode::RDPred, PredMode::VRPred, PredMode::VLPred,
51 PredMode::HUPred, PredMode::HDPred
52 ];
53
54 let mut ipred4 = IPredContext::default();
55 let mut top = [0x80; 21];
56 let mut diff = [0i16; 16];
57 let mut yblk = [0u8; 16];
58 top[0] = pctx.ipred_y.tl;
59 top[1..][..16].copy_from_slice(&pctx.ipred_y.top);
60 top[17..].copy_from_slice(&pctx.tr);
61
62 let mut tot_dist = 0;
63 let mut nz_top = pctx.pctx.nz_y_top;
64 let mut nz_left = pctx.pctx.nz_y_left;
65 for y in 0..4 {
66 let (l1, l2) = ipred4.left.split_at_mut(16 - y * 4);
67 l1.copy_from_slice(&pctx.ipred_y.left[y * 4..]);
68 for el in l2.iter_mut() { *el = 0x80; }
69
70 ipred4.tl = if y == 0 { top[0] } else { pctx.ipred_y.left[y * 4 - 1] };
71 for x in 0..4 {
72 let tsrc = &top[x * 4 + 1..];
73 let (t1, t2) = ipred4.top.split_at_mut(tsrc.len().min(16));
74 for (dst, &src) in t1.iter_mut().zip(tsrc.iter()) { *dst = src; }
75 for el in t2.iter_mut() { *el = 0x80; }
76
77 let mut best_mode = PredMode::DCPred;
78 let mut best_dist = MAX_DIST;
79 let mut best_has_nz = false;
80
81 let srcblk = src.next().unwrap();
82 for &mode in PRED4X4.iter() {
83 yblk.ipred4(4, mode, &ipred4);
84 let mode_nits = b_mode_nits(mode);
85 let blkctx = (nz_top[x] as u8) + (nz_left[y] as u8);
86 let (dist1, has_nz) = pctx.metric.block_dist(&srcblk, &yblk, pctx.q, 3, blkctx, &pctx.models.coef_probs[3]);
87 let dist = dist1 + pctx.metric.calc_metric(0, mode_nits);
88 if dist < best_dist {
89 best_mode = mode;
90 best_dist = dist;
91 best_has_nz = has_nz;
92 if dist <= SMALL_DIST {
93 break;
94 }
95 }
96 }
97 nz_top[x] = best_has_nz;
98 nz_left[y] = best_has_nz;
99 modes[x + y * 4] = best_mode;
100 tot_dist += best_dist;
101 if tot_dist >= ref_best_dist {
102 return MAX_DIST;
103 }
104
105 yblk.ipred4(4, modes[x + y * 4], &ipred4);
106 get_block_difference(&mut diff, &srcblk, &yblk);
107 res.luma[x + y * 4] = diff;
108 diff.fdct();
109 diff.requant_y(pctx.q);
110 diff.idct();
111
112 let nblk = &mut new[x * 4 + y * 4 * 16..];
113 for (dst, (src, res)) in nblk.chunks_mut(16).zip(yblk.chunks(4).zip(diff.chunks(4))) {
114 for (del, (&sel, &rel)) in dst.iter_mut().zip(src.iter().zip(res.iter())) {
115 *del = (i16::from(sel) + rel).max(0).min(255) as u8;
116 }
117 }
118
119 ipred4.tl = top[x * 4 + 4];
120 top[x * 4 + 1..][..4].copy_from_slice(&nblk[16 * 3..][..4]);
121 for (dst, src) in ipred4.left[..4].iter_mut().zip(nblk.chunks(16)) {
122 *dst = src[3];
123 }
124 }
125 }
126 tot_dist
127}
128
129fn try_intra16_pred(sblk: &SrcBlock, newblk: &mut SrcBlock, res: &mut Residue, imctx: &IntraModePredCtx, ymode: PredMode) -> u32 {
130 newblk.fill_ipred_luma(ymode, &imctx.ipred_y);
131
132 for (dst, (src1, src2)) in res.luma.iter_mut().zip(sblk.luma_blocks().zip(newblk.luma_blocks())) {
133 get_block_difference(dst, &src1, &src2);
134 }
135
136 let mut nits = 0;
137
138 res.fdct_luma();
139 res.fdct_dc_block();
140 res.quant_luma(imctx.q);
141 nits += estimate_subblock_nits(&res.dcs, 1, imctx.pctx.nz_y2, &imctx.models.coef_probs[1]);
142 let mut nz_top = imctx.pctx.nz_y_top;
143 let mut nz_left = imctx.pctx.nz_y_left;
144 for (y, row) in res.luma.chunks(4).enumerate() {
145 for (x, blk) in row.iter().enumerate() {
146 let has_nz = blk.has_nz();
147 let pctx = (nz_top[x] as u8) + (nz_left[y] as u8);
148 nits += estimate_subblock_nits(blk, 0, pctx, &imctx.models.coef_probs[0]);
149 nz_top[x] = has_nz;
150 nz_left[y] = has_nz;
151 }
152 }
153 res.dequant_luma();
154 res.idct_luma();
155
156 let mut dist = 0;
157 for (diff, (src1, src2)) in res.luma.iter().zip(sblk.luma_blocks().zip(newblk.luma_blocks())) {
158 dist += get_difference_dist(&src1, &src2, diff);
159 }
160
161 imctx.metric.calc_metric(dist, nits)
162}
163
164pub fn select_intra_mode(sblk: &SrcBlock, newblk: &mut SrcBlock, res: &mut Residue, imctx: &IntraModePredCtx, ref_best_dist: u32, mb_type: MBType) -> MBType {
165 const PRED16X16: [PredMode; 4] = [PredMode::DCPred, PredMode::HPred, PredMode::VPred, PredMode::TMPred];
166
167 let mut best_ymode = PredMode::DCPred;
168 let mut y_best_dist = MAX_DIST;
169 let mut use_i4 = false;
170 let mut i4_modes = [PredMode::DCPred; 16];
171 if !sblk.is_flat() {
172 for &ymode in PRED16X16.iter() {
173 let dist = try_intra16_pred(sblk, newblk, res, imctx, ymode);
174
175 if dist < y_best_dist {
176 best_ymode = ymode;
177 y_best_dist = dist;
178 if dist <= SMALL_DIST {
179 break;
180 }
181 }
182 }
183
184 if y_best_dist >= ref_best_dist {
185 return mb_type;
186 }
187
188 if y_best_dist > SMALL_DIST {
189 res.reset();
6f263099 190 let dist4 = try_i4x4_pred(sblk.luma_blocks(), &mut i4_modes, res, &mut newblk.luma, imctx, y_best_dist);
c5d5793c
KS
191 use_i4 = dist4 < y_best_dist;
192 y_best_dist = y_best_dist.min(dist4);
193 }
194 } else if ref_best_dist != MAX_DIST { // we can skip that for intra-only case
195 y_best_dist = try_intra16_pred(sblk, newblk, res, imctx, PredMode::DCPred);
196 if y_best_dist >= ref_best_dist {
197 return mb_type;
198 }
199 }
200
201 let mut best_cmode = PredMode::DCPred;
202 let mut c_best_dist = MAX_DIST;
203 for &cmode in PRED16X16.iter() {
204 newblk.fill_ipred_chroma(cmode, &imctx.ipred_u, &imctx.ipred_v);
205 let mut dist = 0;
206 'csearch: for chroma in 0..2 {
207 let mut nz_top = imctx.pctx.nz_c_top[chroma];
208 let mut nz_left = imctx.pctx.nz_c_left[chroma];
209 for (idx, (sblk, nblk)) in sblk.chroma_blocks(chroma).zip(newblk.chroma_blocks(chroma)).enumerate() {
210 let pctx = (nz_top[idx & 1] as u8) + (nz_left[idx >> 1] as u8);
211 let (dist1, has_nz) = imctx.metric.block_dist(&sblk, &nblk, imctx.q, 2, pctx, &imctx.models.coef_probs[2]);
212 dist += dist1;
213 nz_top[idx & 1] = has_nz;
214 nz_left[idx >> 1] = has_nz;
215 if dist >= c_best_dist {
216 break 'csearch;
217 }
218 }
219 }
220 if dist < c_best_dist {
221 best_cmode = cmode;
222 c_best_dist = dist;
223 }
224 }
225 let tot_dist = y_best_dist.saturating_add(c_best_dist);
226 if (ref_best_dist == MAX_DIST) || (tot_dist < ref_best_dist) {
227 if !use_i4 {
228 MBType::Intra(best_ymode, best_cmode)
229 } else {
230 MBType::Intra4x4(i4_modes, [0; 16], best_cmode)
231 }
232 } else {
233 mb_type
234 }
235}
236
237pub fn calc_inter_mb_dist(sblk: &SrcBlock, newblk: &SrcBlock, res: &mut Residue, imctx: &IntraModePredCtx, pdc: i16) -> u32 {
238 res.set_luma_from_diff(&sblk.luma, &newblk.luma);
239 res.set_chroma_from_diff(&sblk.chroma, &newblk.chroma);
240 res.fdct();
241 res.fdct_dc_block();
242 requant_y2_dc(&mut res.dcs[0], imctx.q);
243 res.dcs[0] -= pdc;
244 res.quant(imctx.q);
245 let mut nits = estimate_subblock_nits(&res.dcs, 1, imctx.pctx.nz_y2, &imctx.models.coef_probs[1]);
246 let mut nz_top = imctx.pctx.nz_y_top;
247 let mut nz_left = imctx.pctx.nz_y_left;
248 for (y, row) in res.luma.chunks(4).enumerate() {
249 for (x, blk) in row.iter().enumerate() {
250 let has_nz = blk.has_nz();
251 let pctx = (nz_top[x] as u8) + (nz_left[y] as u8);
252 nits += estimate_subblock_nits(blk, 0, pctx, &imctx.models.coef_probs[0]);
253 nz_top[x] = has_nz;
254 nz_left[y] = has_nz;
255 }
256 }
257 for (c_idx, chroma) in res.chroma.iter().enumerate() {
258 let mut nz_top = imctx.pctx.nz_c_top[c_idx];
259 let mut nz_left = imctx.pctx.nz_c_left[c_idx];
260 for (idx, blk) in chroma.iter().enumerate() {
261 let pctx = (nz_top[idx & 1] as u8) + (nz_left[idx >> 1] as u8);
262 let has_nz = blk.has_nz();
263 nits += estimate_subblock_nits(blk, 2, pctx, &imctx.models.coef_probs[2]);
264 nz_top[idx & 1] = has_nz;
265 nz_left[idx >> 1] = has_nz;
266 }
267 }
268 res.dequant();
269 res.idct();
270 let mut dist = 0;
271 for (diff, (src, new)) in res.luma.iter().zip(sblk.luma_blocks().zip(newblk.luma_blocks())) {
272 dist += get_difference_dist(&src, &new, diff);
273 }
274 for chroma in 0..2 {
275 for (diff, (src, new)) in res.chroma[chroma].iter().zip(sblk.chroma_blocks(chroma).zip(newblk.chroma_blocks(chroma))) {
276 dist += get_difference_dist(&src, &new, diff);
277 }
278 }
279
280 res.reset();
281 imctx.metric.calc_metric(dist, nits)
282}
283
284#[allow(clippy::too_many_arguments)]
285pub fn try_inter_split(sblk: &SrcBlock, newblk: &mut SrcBlock, res: &mut Residue, mvprobs: [u8; 4], nearest_mv: MV, near_mv: MV, pred_mv: MV, last: bool, mb_x: usize, mb_y: usize, mv_search: &mut Box<dyn MVSearch + Send>, mv_est: &mut MVEstimator, pctx: &mut PredContext, imctx: &IntraModePredCtx, inter_dist: u32) -> Option<(MBType, u32)> {
286 let mv_stride = pctx.mv_stride;
287 let mut blk8 = [0; 64];
288 let mut mvs8 = [ZERO_MV; 4];
289 let mut split_cand = [false; 4];
290 let mut mv_dist = [0; 4];
291
292 let mb_mv = pctx.mvs[mb_x * 4 + mb_y * 4 * mv_stride];
293 for (quarter, dst_mv) in mvs8.iter_mut().enumerate() {
294 let xoff = mb_x * 16 + (quarter & 1) * 8;
295 let yoff = mb_y * 16 + (quarter & 2) * 4;
296
297 let off = (quarter & 1) * 8 + (quarter >> 1) * 8 * 16;
298 for (src, dst) in sblk.luma[off..].chunks(16).zip(blk8.chunks_mut(8)) {
299 dst.copy_from_slice(&src[..8]);
300 }
301
302 let mut mvs = UniqueList::new();
303 mvs.add(ZERO_MV);
304 mvs.add(mb_mv);
305 let mv_idx = xoff / 4 + (yoff / 4) * mv_stride;
306 if xoff > 0 {
307 mvs.add(pctx.mvs[mv_idx - 1]);
308 }
309 if mv_idx >= mv_stride {
310 mvs.add(pctx.mvs[mv_idx - mv_stride]);
311 }
312 mvs.add(near_mv);
313 mvs.add(nearest_mv);
314 let (mv, dist) = mv_search.search_blk8(mv_est, &blk8, xoff, yoff, mvs.get_list());
315 *dst_mv = mv;
316 split_cand[quarter] = dist > LARGE_BLK8_DIST;
317 mv_dist[quarter] = dist;
318 }
319 if mvs8[0] == mvs8[1] && mvs8[0] == mvs8[2] && mvs8[0] == mvs8[3] {
320 // single MV per MB
321 return None;
322 }
323 let mv_idx = mb_x * 4 + mb_y * 4 * mv_stride;
324 for (dst, src) in pctx.mvs[mv_idx..].chunks_mut(2 * mv_stride).zip(mvs8.chunks(2)) {
325 dst[0] = src[0];
326 dst[1] = src[0];
327 dst[2] = src[1];
328 dst[3] = src[1];
329 dst[mv_stride ] = src[0];
330 dst[mv_stride + 1] = src[0];
331 dst[mv_stride + 2] = src[1];
332 dst[mv_stride + 3] = src[1];
333 }
334 recon_split_mb(newblk, mb_x, mb_y, &pctx.mvs, mv_stride, mv_est);
335
336 let mut tot_dist = calc_inter_mb_dist(sblk, newblk, res, imctx, pctx.get_y2_dc_pred(last));
337
338 let mut split_mode = MVSplitMode::Quarters;
339 let mut sub_refs = [SubMVRef::Zero; 16];
340 let mut sub_mvs = [ZERO_MV; 16];
341
342 let mut mv_nits = 0;
343 if mvs8[0] == mvs8[1] && mvs8[2] == mvs8[3] {
344 split_mode = MVSplitMode::TopBottom;
345 sub_mvs[0] = mvs8[0] - pred_mv;
346 sub_mvs[1] = mvs8[2] - pred_mv;
347
348 let mv_idx = mb_x * 4 + mb_y * 4 * mv_stride;
349 let left_mv = if mb_x > 0 { pctx.mvs[mv_idx - 1] } else { ZERO_MV };
350 let top_mv = if mb_y > 0 { pctx.mvs[mv_idx - mv_stride] } else { ZERO_MV };
351 let (ref0, nits0) = sub_mv_nits(mvs8[0], left_mv, top_mv, pred_mv, imctx.models);
352 let left_mv = if mb_x > 0 { pctx.mvs[mv_idx + 2 * mv_stride - 1] } else { ZERO_MV };
353 let (ref1, nits1) = sub_mv_nits(mvs8[2], left_mv, mvs8[0], pred_mv, imctx.models);
354 sub_refs[0] = ref0;
355 sub_refs[1] = ref1;
356 mv_nits += nits0 + nits1;
357 } else if mvs8[0] == mvs8[2] && mvs8[1] == mvs8[3] {
358 split_mode = MVSplitMode::LeftRight;
359 sub_mvs[0] = mvs8[0] - pred_mv;
360 sub_mvs[1] = mvs8[1] - pred_mv;
361
362 let mv_idx = mb_x * 4 + mb_y * 4 * mv_stride;
363 let left_mv = if mb_x > 0 { pctx.mvs[mv_idx - 1] } else { ZERO_MV };
364 let top_mv = if mb_y > 0 { pctx.mvs[mv_idx - mv_stride] } else { ZERO_MV };
365 let (ref0, nits0) = sub_mv_nits(mvs8[0], left_mv, top_mv, pred_mv, imctx.models);
366 let top_mv = if mb_y > 0 { pctx.mvs[mv_idx - mv_stride + 2] } else { ZERO_MV };
367 let (ref1, nits1) = sub_mv_nits(mvs8[1], mvs8[0], top_mv, pred_mv, imctx.models);
368 sub_refs[0] = ref0;
369 sub_refs[1] = ref1;
370 mv_nits += nits0 + nits1;
371 } else {
372 for (quarter, &mv) in mvs8.iter().enumerate() {
373 let xoff = mb_x * 16 + (quarter & 1) * 8;
374 let yoff = mb_y * 16 + (quarter & 2) * 4;
375 let mv_idx = xoff / 4 + (yoff / 4) * mv_stride;
376 let left_mv = if xoff > 0 { pctx.mvs[mv_idx - 1] } else { ZERO_MV };
377 let top_mv = if yoff > 0 { pctx.mvs[mv_idx - mv_stride] } else { ZERO_MV };
378 let (cur_sub_ref, nits) = sub_mv_nits(mv, left_mv, top_mv, pred_mv, imctx.models);
379 sub_refs[quarter] = cur_sub_ref;
380 sub_mvs[quarter] = mv - pred_mv;
381 mv_nits += nits;
382
383 pctx.mvs[mv_idx] = mv;
384 pctx.mvs[mv_idx + 1] = mv;
385 pctx.mvs[mv_idx + mv_stride] = mv;
386 pctx.mvs[mv_idx + mv_stride + 1] = mv;
387 }
388 }
389 mv_nits += sub_mv_mode_nits(split_mode);
390 tot_dist += imctx.metric.calc_metric(0, mv_nits);
391 if tot_dist < inter_dist {
392 if tot_dist > SMALL_DIST && (split_cand[0] || split_cand[1] || split_cand[2] || split_cand[3]) {
393 let mut blk4 = [0; 16];
394 let mut has_splits = false;
395 for (quarter, &mv_dist) in mv_dist.iter().enumerate() {
396 if !split_cand[quarter] {
397 continue;
398 }
399 let xoff = mb_x * 16 + (quarter & 1) * 8;
400 let yoff = mb_y * 16 + (quarter & 2) * 4;
401 let mut dist_sum = 0;
402 let mut smv = [ZERO_MV; 4];
403 for (subq, smv) in smv.iter_mut().enumerate() {
404 let off = (quarter & 1) * 8 + (subq & 1) * 4 + ((quarter >> 1) * 8 + (subq >> 1) * 4) * 16;
405 for (dst, src) in blk4.chunks_mut(4).zip(sblk.luma[off..].chunks(16)) {
406 dst.copy_from_slice(&src[..4]);
407 }
408
409 let mut mvs = UniqueList::new();
410 mvs.add(ZERO_MV);
411 mvs.add(mvs8[quarter]);
412 mvs.add(mb_mv);
413 let mv_idx = xoff / 4 + (subq & 1) + ((yoff / 4) + (subq >> 1)) * mv_stride;
414 if xoff > 0 || (subq & 1) != 0 {
415 mvs.add(pctx.mvs[mv_idx - 1]);
416 }
417 if mv_idx >= mv_stride {
418 mvs.add(pctx.mvs[mv_idx - mv_stride]);
419 }
420 let (mv, dist) = mv_search.search_blk4(mv_est, &blk4, xoff, yoff, mvs.get_list());
421 *smv = mv;
422 dist_sum += dist;
423 }
424 if dist_sum < mv_dist / 2 {
425 for (subq, &smv) in smv.iter().enumerate() {
426 let mv_idx = xoff / 4 + (subq & 1) + ((yoff / 4) + (subq >> 1)) * mv_stride;
427 pctx.mvs[mv_idx] = smv;
428 }
429 has_splits = true;
430 }
431 }
432 if has_splits {
433 recon_split_mb(newblk, mb_x, mb_y, &pctx.mvs, mv_stride, mv_est);
434 let mut split16_dist = calc_inter_mb_dist(sblk, newblk, res, imctx, pctx.get_y2_dc_pred(last));
435 if split16_dist < tot_dist {
436 let mut mv_nits = sub_mv_mode_nits(MVSplitMode::Sixteenths);
437 let mut mv_idx = mb_x * 4 + mb_y * 4 * mv_stride;
438 let mut sub_refs2 = [SubMVRef::Zero; 16];
439 let mut sub_mvs2 = [ZERO_MV; 16];
440 for y in 0..4 {
441 for x in 0..4 {
442 let left_mv = if x > 0 || mb_x > 0 { pctx.mvs[mv_idx + x - 1] } else { ZERO_MV };
443 let top_mv = if mv_idx + x >= mv_stride { pctx.mvs[mv_idx + x - mv_stride] } else { ZERO_MV };
444 let cur_mv = pctx.mvs[mv_idx + x];
445 sub_mvs2[x + y * 4] = cur_mv - pred_mv;
446 let (cur_sub_ref, nits) = sub_mv_nits(cur_mv, left_mv, top_mv, pred_mv, imctx.models);
447 sub_refs2[x + y * 4] = cur_sub_ref;
448 mv_nits += nits;
449 }
450 mv_idx += mv_stride;
451 }
452 split16_dist += imctx.metric.calc_metric(0, mv_nits);
453 if split16_dist < tot_dist {
454 let mb_t = MBType::InterSplitMV(last, mvprobs, MVSplitMode::Sixteenths, sub_refs2, sub_mvs2);
455 return Some((mb_t, split16_dist));
456 }
457 }
458 }
459 }
460 let mv_idx = mb_x * 4 + mb_y * 4 * mv_stride;
461 for (dst, src) in pctx.mvs[mv_idx..].chunks_mut(2 * mv_stride).zip(mvs8.chunks(2)) {
462 dst[0] = src[0];
463 dst[1] = src[0];
464 dst[2] = src[1];
465 dst[3] = src[1];
466 dst[mv_stride ] = src[0];
467 dst[mv_stride + 1] = src[0];
468 dst[mv_stride + 2] = src[1];
469 dst[mv_stride + 3] = src[1];
470 }
471 Some((MBType::InterSplitMV(last, mvprobs, split_mode, sub_refs, sub_mvs), tot_dist))
472 } else {
473 None
474 }
475}
476
477fn get_chroma_mv(mut mv: MV) -> MV {
478 if mv.x < 0 {
479 mv.x += 1;
480 } else {
481 mv.x += 2;
482 }
483 if mv.y < 0 {
484 mv.y += 1;
485 } else {
486 mv.y += 2;
487 }
488 mv.x >>= 2;
489 mv.y >>= 2;
490 mv
491}
492
493pub fn recon_split_mb(newblk: &mut SrcBlock, mb_x: usize, mb_y: usize, mvs: &[MV], mv_stride: usize, mv_est: &mut MVEstimator) {
494 let mut mv_idx = mb_x * 4 + mb_y * 4 * mv_stride;
495 let mut sum_mv = [ZERO_MV; 2];
496 let mut blk4 = [0; 16];
497 for (y, strip) in newblk.luma.chunks_mut(16 * 4).enumerate() {
498 if (y & 1) == 0 {
499 sum_mv = [ZERO_MV; 2];
500 }
501 for x in 0..4 {
502 let mv = mvs[mv_idx + x];
503 sum_mv[x / 2] += mv;
504 mv_est.get_blk4(&mut blk4, 0, mb_x * 16 + x * 4, mb_y * 16 + y * 4, mv);
505 for (dst, src) in strip[x * 4..].chunks_mut(16).zip(blk4.chunks(4)) {
506 dst[..4].copy_from_slice(src);
507 }
508 }
509 if (y & 1) == 1 {
510 let cmv = [get_chroma_mv(sum_mv[0]), get_chroma_mv(sum_mv[1])];
511 for chroma in 0..2 {
512 for (x, &mv) in cmv.iter().enumerate() {
513 mv_est.get_blk4(&mut blk4, chroma + 1, mb_x * 8 + x * 4, mb_y * 8 + (y & 2) * 2, mv);
514 for (dst, src) in newblk.chroma[chroma][x * 4 + (y & 2) * 2 * 8..].chunks_mut(8).zip(blk4.chunks(4)) {
515 dst[..4].copy_from_slice(src);
516 }
517 }
518 }
519 }
520 mv_idx += mv_stride;
521 }
522}