+
+ let mut coded_cache = [false; 25];
+ let mut mv_cache = [[ZERO_MV; 2]; 25];
+ let mut ref_cache = [[INVALID_REF; 2]; 25];
+
+ if self.mb_y != 0 || self.has_top {
+ for (x, (cc, mv)) in coded_cache[1..5].iter_mut().zip(mv_cache[1..5].iter_mut()).enumerate() {
+ let blk4 = self.get_top_blk4(x);
+ *cc = blk4.ncoded != 0;
+ *mv = blk4.mv;
+ if (x & 1) == 0 {
+ let blk8 = self.get_top_blk8(x / 2);
+ ref_cache[x + 1] = blk8.ref_idx;
+ } else {
+ ref_cache[x + 1] = ref_cache[x];
+ }
+ }
+ }
+ for (y, (ccs, mvs)) in coded_cache[5..].chunks_exact_mut(5).zip(
+ mv_cache[5..].chunks_exact_mut(5)).enumerate() {
+ if self.has_left || self.mb_x != 0 {
+ let blk4 = self.get_left_blk4(y * 4);
+ ccs[0] = blk4.ncoded != 0;
+ mvs[0] = blk4.mv;
+ if (y & 1) == 0 {
+ let blk8 = self.get_left_blk8(y);
+ ref_cache[y * 5 + 5] = blk8.ref_idx;
+ } else {
+ ref_cache[y * 5 + 5] = ref_cache[y * 5];
+ }
+ }
+ for (x, (cc, mv)) in ccs[1..].iter_mut().zip(mvs[1..].iter_mut()).enumerate() {
+ let blk4 = self.get_cur_blk4(x + y * 4);
+ *cc = blk4.ncoded != 0;
+ *mv = blk4.mv;
+ ref_cache[x + 1 + (y + 1) * 5] = if ((x & 1) == 0) && ((y & 1) == 0) {
+ self.get_cur_blk8(x / 2 + y).ref_idx
+ } else {
+ ref_cache[(x & !1) + 1 + ((y & !1) + 1) * 5]
+ };
+ }
+ }
+
+ for (y, (((top_ccs, cur_ccs), (top_mvs, cur_mvs)), (cur_refs, top_refs))) in
+ coded_cache.chunks_exact(5).take(4).zip(coded_cache[5..].chunks_exact(5)).zip(
+ mv_cache.chunks_exact(5).zip(mv_cache[5..].chunks_exact(5))).zip(
+ ref_cache[5..].chunks_exact(5).zip(ref_cache.chunks_exact(5))).enumerate() {