1 use nihav_codec_support::codecs::{MV, ZERO_MV};
2 use nihav_codec_support::data::GenericCache;
6 #[derive(Clone,Copy,Debug,PartialEq)]
13 #[derive(Clone,Copy,Debug,PartialEq)]
17 Intra16x16(u8, u8, u8),
36 pub fn is_intra(self) -> bool {
38 MBType::Intra4x4 | MBType::Intra8x8 | MBType::Intra16x16(_, _, _) | MBType::PCM => true,
42 pub fn is_intra16x16(self) -> bool {
43 if let MBType::Intra16x16(_, _, _) = self {
49 pub fn is_skip(self) -> bool {
51 MBType::PSkip | MBType::BSkip => true,
55 pub fn is_4x4(self) -> bool { self.num_parts() == 4 }
56 pub fn is_l0(self, part: usize) -> bool {
58 MBType::B16x16(mode) => mode == BMode::L0,
59 MBType::B16x8(mode0, mode1) | MBType::B8x16(mode0, mode1) => {
66 MBType::Direct | MBType::BSkip => false,
70 pub fn is_l1(self, part: usize) -> bool {
72 MBType::B16x16(mode) => mode == BMode::L1,
73 MBType::B16x8(mode0, mode1) | MBType::B8x16(mode0, mode1) => {
83 pub fn num_parts(self) -> usize {
85 MBType::Intra4x4 | MBType::Intra8x8 | MBType::Intra16x16(_, _, _) | MBType::PCM |
87 MBType::Direct | MBType::BSkip
92 MBType::P16x8 | MBType::P8x16 |
93 MBType::B16x8(_, _) | MBType::B8x16(_, _)
98 pub fn size(self) -> (usize, usize) {
102 MBType::Intra16x16(_, _, _) |
110 MBType::P16x8 | MBType::B16x8(_, _) => (16, 8),
111 MBType::P8x16 | MBType::B8x16(_, _) => (8, 16),
117 impl Default for MBType {
118 fn default() -> Self { MBType::Intra4x4 }
121 #[derive(Clone,Copy,Debug,PartialEq)]
135 pub fn num_parts(self) -> usize {
137 SubMBType::P8x8 | SubMBType::Direct8x8 | SubMBType::B8x8(_) => 1,
138 SubMBType::P4x4 | SubMBType::B4x4(_) => 4,
142 pub fn size(self) -> (usize, usize) {
144 SubMBType::P8x8 | SubMBType::Direct8x8 | SubMBType::B8x8(_) => (8, 8),
145 SubMBType::P8x4 | SubMBType::B8x4(_) => (8, 4),
146 SubMBType::P4x8 | SubMBType::B4x8(_) => (4, 8),
147 SubMBType::P4x4 | SubMBType::B4x4(_) => (4, 4),
150 pub fn is_l0(self) -> bool {
152 SubMBType::B8x8(mode) | SubMBType::B8x4(mode) |
153 SubMBType::B4x8(mode) | SubMBType::B4x4(mode) => {
159 pub fn is_l1(self) -> bool {
161 SubMBType::B8x8(mode) | SubMBType::B8x4(mode) |
162 SubMBType::B4x8(mode) | SubMBType::B4x4(mode) => {
170 impl Default for SubMBType {
171 fn default() -> Self { SubMBType::Direct8x8 }
175 #[derive(Clone,Copy,Debug,PartialEq)]
176 pub enum CompactMBType {
200 pub fn is_intra(self) -> bool {
202 CompactMBType::Intra4x4 | CompactMBType::Intra8x8 | CompactMBType::Intra16x16 => true,
206 pub fn is_intra16orpcm(self) -> bool {
208 CompactMBType::Intra16x16 | CompactMBType::PCM => true,
212 pub fn is_skip(self) -> bool {
214 CompactMBType::PSkip | CompactMBType::BSkip => true,
218 pub fn is_direct(self) -> bool {
220 CompactMBType::BSkip | CompactMBType::Direct | CompactMBType::None => true,
224 pub fn is_inter(self) -> bool {
225 !self.is_intra() && !self.is_skip() && self != CompactMBType::PCM
227 pub fn is_16x16(self) -> bool {
229 CompactMBType::P16x8 | CompactMBType::P8x16 |
230 CompactMBType::P8x8 | CompactMBType::P8x8Ref0 |
231 CompactMBType::B16x8 | CompactMBType::B8x16 |
232 CompactMBType::B8x8 => false,
238 impl Default for CompactMBType {
239 fn default() -> Self { CompactMBType::None }
242 impl From<MBType> for CompactMBType {
243 fn from(mbtype: MBType) -> Self {
245 MBType::Intra4x4 => CompactMBType::Intra4x4,
246 MBType::Intra8x8 => CompactMBType::Intra8x8,
247 MBType::Intra16x16(_, _, _) => CompactMBType::Intra16x16,
248 MBType::PCM => CompactMBType::PCM,
249 MBType::P16x16 => CompactMBType::P16x16,
250 MBType::P16x8 => CompactMBType::P16x8,
251 MBType::P8x16 => CompactMBType::P8x16,
252 MBType::P8x8 => CompactMBType::P8x8,
253 MBType::P8x8Ref0 => CompactMBType::P8x8Ref0,
254 MBType::PSkip => CompactMBType::PSkip,
255 MBType::Direct => CompactMBType::Direct,
256 MBType::B16x16(_) => CompactMBType::B16x16,
257 MBType::B16x8(_, _) => CompactMBType::B16x8,
258 MBType::B8x16(_, _) => CompactMBType::B8x16,
259 MBType::B8x8 => CompactMBType::B8x8,
260 MBType::BSkip => CompactMBType::BSkip,
266 #[derive(Clone,Copy,Debug,PartialEq)]
267 pub enum IntraPredMode {
281 pub fn is_none(self) -> bool { self == IntraPredMode::None }
282 pub fn into_pred_idx(self) -> i8 {
291 impl Default for IntraPredMode {
292 fn default() -> Self { IntraPredMode::None }
295 impl From<u8> for IntraPredMode {
296 fn from(val: u8) -> Self {
298 0 => IntraPredMode::Vertical,
299 1 => IntraPredMode::Horizontal,
300 2 => IntraPredMode::DC,
301 3 => IntraPredMode::DiagDownLeft,
302 4 => IntraPredMode::DiagDownRight,
303 5 => IntraPredMode::VerRight,
304 6 => IntraPredMode::HorDown,
305 7 => IntraPredMode::VerLeft,
306 8 => IntraPredMode::HorUp,
307 _ => IntraPredMode::None,
312 impl Into<u8> for IntraPredMode {
313 fn into(self) -> u8 {
315 IntraPredMode::Vertical => 0,
316 IntraPredMode::Horizontal => 1,
317 IntraPredMode::DC => 2,
318 IntraPredMode::DiagDownLeft => 3,
319 IntraPredMode::DiagDownRight => 4,
320 IntraPredMode::VerRight => 5,
321 IntraPredMode::HorDown => 6,
322 IntraPredMode::VerLeft => 7,
323 IntraPredMode::HorUp => 8,
329 pub const MISSING_POC: u16 = 0xFFFF;
331 #[derive(Clone,Copy,Debug)]
336 pub const MISSING_REF: PicRef = PicRef { ref_idx: 0xFF };
337 pub const INVALID_REF: PicRef = PicRef { ref_idx: 0xFE };
338 pub const ZERO_REF: PicRef = PicRef { ref_idx: 0 };
339 const DIRECT_FLAG: u8 = 0x40;
342 pub fn new(ref_idx: u8) -> Self {
345 pub fn not_avail(self) -> bool {
346 self == MISSING_REF || self == INVALID_REF
348 pub fn index(self) -> usize { (self.ref_idx & !DIRECT_FLAG) as usize }
349 pub fn is_direct(self) -> bool { (self.ref_idx & DIRECT_FLAG) != 0 }
350 pub fn set_direct(&mut self) { self.ref_idx |= DIRECT_FLAG; }
351 fn min_pos(self, other: Self) -> Self {
352 match (self.not_avail(), other.not_avail()) {
353 (true, true) => self,
354 (false, true) => self,
355 (true, false) => other,
356 (false, false) => PicRef::new((self.ref_idx & !DIRECT_FLAG).min(other.ref_idx & !DIRECT_FLAG)),
361 impl Default for PicRef {
362 fn default() -> Self { MISSING_REF }
365 impl PartialEq for PicRef {
366 fn eq(&self, other: &Self) -> bool {
367 (self.ref_idx | DIRECT_FLAG) == (other.ref_idx | DIRECT_FLAG)
371 impl std::fmt::Display for PicRef {
372 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
373 if *self == MISSING_REF {
375 } else if *self == INVALID_REF {
378 write!(f, "{}", self.ref_idx & !DIRECT_FLAG)
383 #[derive(Clone,Copy,Default)]
385 pub mb_type: CompactMBType,
387 pub coded_flags: u32,
392 pub transform_8x8: bool,
395 pub fn blk4_to_blk8(blk4: usize) -> usize {
396 const MAP: [usize; 16] = [ 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3 ];
400 #[derive(Clone,Copy)]
401 pub struct Blk8Data {
402 pub ref_idx: [PicRef; 2],
403 pub ncoded_c: [u8; 2],
406 impl Default for Blk8Data {
407 fn default() -> Self {
409 ref_idx: [MISSING_REF; 2],
415 #[derive(Clone,Copy,Default)]
416 pub struct Blk4Data {
418 pub ipred: IntraPredMode,
423 pub struct SliceState {
430 pub mb: GenericCache<MBData>,
431 pub blk8: GenericCache<Blk8Data>,
432 pub blk4: GenericCache<Blk4Data>,
434 pub deblock: GenericCache<u8>,
441 pub fn new() -> Self {
448 mb: GenericCache::new(0, 0, MBData::default()),
449 blk8: GenericCache::new(0, 0, Blk8Data::default()),
450 blk4: GenericCache::new(0, 0, Blk4Data::default()),
452 deblock: GenericCache::new(0, 0, 0),
458 pub fn reset(&mut self, mb_w: usize, mb_h: usize, mb_pos: usize) {
461 self.mb_start = mb_pos;
463 self.mb_x = mb_pos % mb_w;
464 self.mb_y = mb_pos / mb_w;
469 self.mb = GenericCache::new(1, mb_w + 2, MBData::default());
470 self.blk8 = GenericCache::new(2, mb_w * 2 + 2, Blk8Data::default());
471 self.blk4 = GenericCache::new(4, mb_w * 4 + 2, Blk4Data::default());
473 self.deblock = GenericCache::new(4, mb_w * 4 + 1, 0);
475 self.has_top = false;
476 self.has_left = false;
478 pub fn fill_deblock(&mut self, deblock_mode: u8, is_s: bool) {
479 if deblock_mode == 1 {
483 let tx8x8 = self.get_cur_mb().transform_8x8;
485 let mut idx = self.deblock.xpos + self.mb_x * 4;
486 let cur_mbt = self.get_cur_mb().mb_type;
487 let left_mbt = self.get_left_mb().mb_type;
488 let mut top_mbt = self.get_top_mb().mb_type;
490 if tx8x8 && (y & 1) != 0 {
493 let can_do_top = y != 0 || (self.mb_y != 0 && (self.has_top || deblock_mode != 2));
495 if is_s || cur_mbt.is_intra() || top_mbt.is_intra() {
496 let val = if y == 0 { 0x40 } else { 0x30 };
497 for el in self.deblock.data[idx..][..4].iter_mut() { *el |= val; }
500 if self.get_cur_blk4(x).ncoded != 0 || self.get_top_blk4(x).ncoded != 0 {
501 self.deblock.data[idx + x] |= 0x20;
503 let cur_mv = self.get_cur_blk4(x).mv;
504 let top_mv = self.get_top_blk4(x).mv;
505 let cur_ref = self.get_cur_blk8(x / 2).ref_idx;
506 let top_ref = self.get_top_blk8(x / 2).ref_idx;
507 if mvdiff4(cur_mv[0], top_mv[0]) || mvdiff4(cur_mv[1], top_mv[1]) || cur_ref != top_ref {
508 self.deblock.data[idx + x] |= 0x10;
514 let mut lleft_mbt = left_mbt;
516 if tx8x8 && (x & 1) != 0 {
519 let can_do_left = x > 0 || self.has_left || (self.mb_x != 0 && deblock_mode != 2);
523 let blk4 = x + y * 4;
524 let blk8 = x / 2 + (y / 2) * 2;
525 if is_s || cur_mbt.is_intra() || lleft_mbt.is_intra() {
526 self.deblock.data[idx + x] |= if x == 0 { 4 } else { 3 };
527 } else if self.get_cur_blk4(blk4).ncoded != 0 || self.get_top_blk4(blk4).ncoded != 0 {
528 self.deblock.data[idx + x] |= 2;
530 let cur_mv = self.get_cur_blk4(blk4).mv;
531 let left_mv = self.get_left_blk4(blk4).mv;
532 let cur_ref = self.get_cur_blk8(blk8).ref_idx;
533 let left_ref = self.get_left_blk8(blk8).ref_idx;
534 if mvdiff4(cur_mv[0], left_mv[0]) || mvdiff4(cur_mv[1], left_mv[1]) || cur_ref != left_ref {
535 self.deblock.data[idx + x] |= 1;
541 idx += self.deblock.stride;
544 pub fn next_mb(&mut self) {
546 self.has_left = true;
547 if self.mb_x == self.mb_w {
550 self.mb.update_row();
551 self.blk8.update_row();
552 self.blk4.update_row();
554 self.deblock.update_row();
556 self.has_left = false;
558 self.has_top = self.mb_x + self.mb_y * self.mb_w >= self.mb_start + self.mb_w;
560 pub fn get_cur_mb_idx(&self) -> usize { self.mb.xpos + self.mb_x }
561 pub fn get_cur_blk8_idx(&self, blk_no: usize) -> usize {
562 self.blk8.xpos + self.mb_x * 2 + (blk_no & 1) + (blk_no >> 1) * self.blk8.stride
564 pub fn get_cur_blk4_idx(&self, blk_no: usize) -> usize {
565 self.blk4.xpos + self.mb_x * 4 + (blk_no & 3) + (blk_no >> 2) * self.blk4.stride
567 pub fn get_cur_mb(&mut self) -> &mut MBData {
568 let idx = self.get_cur_mb_idx();
569 &mut self.mb.data[idx]
571 pub fn get_left_mb(&self) -> &MBData {
572 &self.mb.data[self.get_cur_mb_idx() - 1]
574 pub fn get_top_mb(&self) -> &MBData {
575 &self.mb.data[self.get_cur_mb_idx() - self.mb.stride]
577 pub fn get_cur_blk8(&mut self, blk_no: usize) -> &mut Blk8Data {
578 let idx = self.get_cur_blk8_idx(blk_no);
579 &mut self.blk8.data[idx]
581 pub fn get_left_blk8(&self, blk_no: usize) -> &Blk8Data {
582 &self.blk8.data[self.get_cur_blk8_idx(blk_no) - 1]
584 pub fn get_top_blk8(&self, blk_no: usize) -> &Blk8Data {
585 &self.blk8.data[self.get_cur_blk8_idx(blk_no) - self.blk8.stride]
587 pub fn get_cur_blk4(&mut self, blk_no: usize) -> &mut Blk4Data {
588 let idx = self.get_cur_blk4_idx(blk_no);
589 &mut self.blk4.data[idx]
591 pub fn get_left_blk4(&self, blk_no: usize) -> &Blk4Data {
592 &self.blk4.data[self.get_cur_blk4_idx(blk_no) - 1]
594 pub fn get_top_blk4(&self, blk_no: usize) -> &Blk4Data {
595 &self.blk4.data[self.get_cur_blk4_idx(blk_no) - self.blk4.stride]
598 pub fn apply_to_blk8<F: (Fn(&mut Blk8Data))>(&mut self, f: F) {
599 let start = self.get_cur_blk8_idx(0);
600 for row in self.blk8.data[start..].chunks_mut(self.blk8.stride).take(2) {
601 for el in row[..2].iter_mut() {
606 pub fn apply_to_blk4<F: (Fn(&mut Blk4Data))>(&mut self, f: F) {
607 let start = self.get_cur_blk4_idx(0);
608 for row in self.blk4.data[start..].chunks_mut(self.blk4.stride).take(4) {
609 for el in row[..4].iter_mut() {
615 pub fn fill_ipred(&mut self, imode: IntraPredMode) {
616 self.apply_to_blk4(|blk| blk.ipred = imode);
618 pub fn fill_ncoded(&mut self, nc: u8) {
619 self.apply_to_blk4(|blk| blk.ncoded = nc);
620 self.apply_to_blk8(|blk| blk.ncoded_c = [nc; 2]);
622 pub fn reset_mb_mv(&mut self) {
623 self.apply_to_blk8(|blk| blk.ref_idx = [INVALID_REF; 2]);
626 pub fn get_mv_ctx(&self, xoff: usize, yoff: usize, ref_l: usize) -> (usize, usize) {
627 let blk_no = xoff / 4 + yoff;
628 let mv_a = self.get_left_blk4(blk_no).mvd[ref_l];
629 let mv_b = self.get_top_blk4(blk_no).mvd[ref_l];
630 let mv = mv_a + mv_b;
631 let ctx0 = if mv.x < 3 { 0 } else if mv.x <= 32 { 1 } else { 2 };
632 let ctx1 = if mv.y < 3 { 0 } else if mv.y <= 32 { 1 } else { 2 };
635 pub fn get_mv_ref_ctx(&self, xoff: usize, yoff: usize, ref_l: usize) -> usize {
636 let blk_no = xoff / 8 + (yoff / 8) * 2;
638 let left_ref = self.get_left_blk8(blk_no).ref_idx[ref_l];
639 let top_ref = self.get_top_blk8(blk_no).ref_idx[ref_l];
640 if !left_ref.not_avail() && !left_ref.is_direct() && left_ref.index() > 0 {
643 if !top_ref.not_avail() && !top_ref.is_direct() && top_ref.index() > 0 {
648 pub fn predict(&mut self, xpos: usize, ypos: usize, bw: usize, bh: usize, ref_l: usize, diff_mv: MV, ref_idx: PicRef) {
649 let midx = self.get_cur_blk4_idx(0) + xpos / 4 + ypos / 4 * self.blk4.stride;
650 let ridx = self.get_cur_blk8_idx(0) + xpos / 8 + ypos / 8 * self.blk8.stride;
651 let ridx_c = self.get_cur_blk8_idx(0) + (xpos + bw) / 8 + ypos / 8 * self.blk8.stride - if (ypos & 4) == 0 { self.blk8.stride } else { 0 };
653 let mv_a = self.blk4.data[midx - 1].mv[ref_l];
654 let mv_b = self.blk4.data[midx - self.blk4.stride].mv[ref_l];
655 let mut mv_c = self.blk4.data[midx - self.blk4.stride + bw / 4].mv[ref_l];
657 let rx = if (xpos & 4) != 0 { 0 } else { 1 };
658 let ry = if (ypos & 4) != 0 { 0 } else { self.blk8.stride };
659 let ref_a = self.blk8.data[ridx - rx].ref_idx[ref_l];
660 let ref_b = self.blk8.data[ridx - ry].ref_idx[ref_l];
661 let mut ref_c = self.blk8.data[ridx_c].ref_idx[ref_l];
663 if ref_c == MISSING_REF || (((xpos + bw) & 4) == 0 && (ypos & 4) != 0) {
664 mv_c = self.blk4.data[midx - self.blk4.stride - 1].mv[ref_l];
665 ref_c = self.blk8.data[ridx - rx - ry].ref_idx[ref_l];
668 let pred_mv = if bw == 16 && bh == 8 && ypos == 0 && ref_b == ref_idx {
670 } else if bw == 16 && bh == 8 && ypos != 0 && ref_a == ref_idx {
672 } else if bw == 8 && bh == 16 && xpos == 0 && ref_a == ref_idx {
674 } else if bw == 8 && bh == 16 && xpos != 0 && ref_c == ref_idx {
676 } else if ref_b == MISSING_REF && ref_c == MISSING_REF {
679 let count = ((ref_a == ref_idx) as u8) + ((ref_b == ref_idx) as u8) + ((ref_c == ref_idx) as u8);
681 if ref_a == ref_idx {
683 } else if ref_b == ref_idx {
689 MV::pred(mv_a, mv_b, mv_c)
693 let mv = pred_mv + diff_mv;
694 self.fill_mv (xpos, ypos, bw, bh, ref_l, mv);
695 self.fill_ref(xpos, ypos, bw, bh, ref_l, ref_idx);
697 pub fn predict_pskip(&mut self) {
698 let midx = self.get_cur_blk4_idx(0);
699 let ridx = self.get_cur_blk8_idx(0);
701 let mv_a = self.blk4.data[midx - 1].mv[0];
702 let mv_b = self.blk4.data[midx - self.blk4.stride].mv[0];
703 let mut mv_c = self.blk4.data[midx - self.blk4.stride + 4].mv[0];
705 let ref_a = self.blk8.data[ridx - 1].ref_idx[0];
706 let ref_b = self.blk8.data[ridx - self.blk8.stride].ref_idx[0];
707 let mut ref_c = self.blk8.data[ridx - self.blk8.stride + 2].ref_idx[0];
709 if ref_c == MISSING_REF {
710 mv_c = self.blk4.data[midx - self.blk4.stride - 1].mv[0];
711 ref_c = self.blk8.data[ridx - self.blk8.stride - 1].ref_idx[0];
714 let ref_idx = ZERO_REF;
715 let mv = if ref_a == MISSING_REF || ref_b == MISSING_REF || (ref_a == ZERO_REF && mv_a == ZERO_MV) || (ref_b == ZERO_REF && mv_b == ZERO_MV) {
718 let count = ((ref_a == ref_idx) as u8) + ((ref_b == ref_idx) as u8) + ((ref_c == ref_idx) as u8);
720 if ref_a == ref_idx {
722 } else if ref_b == ref_idx {
728 MV::pred(mv_a, mv_b, mv_c)
732 self.fill_mv (0, 0, 16, 16, 0, mv);
733 self.fill_ref(0, 0, 16, 16, 0, ref_idx);
735 pub fn predict_direct_mb(&mut self, frame_refs: &FrameRefs, temporal_mv: bool, cur_id: u16) {
736 let (col_mb, _, _) = frame_refs.get_colocated_info(self.mb_x, self.mb_y);
737 if col_mb.mb_type.is_16x16() || !temporal_mv {
738 let (mv0, ref0, mv1, ref1) = self.get_direct_mv(frame_refs, temporal_mv, cur_id, 0);
739 self.apply_to_blk4(|blk4| blk4.mv = [mv0, mv1]);
740 self.apply_to_blk8(|blk8| blk8.ref_idx = [ref0, ref1]);
743 let (mv0, ref0, mv1, ref1) = self.get_direct_mv(frame_refs, temporal_mv, cur_id, blk4);
744 self.get_cur_blk4(blk4).mv = [mv0, mv1];
745 self.get_cur_blk8(blk4_to_blk8(blk4)).ref_idx = [ref0, ref1];
749 pub fn predict_direct_sub(&mut self, frame_refs: &FrameRefs, temporal_mv: bool, cur_id: u16, blk4: usize) {
751 let (mv0, ref0, mv1, ref1) = self.get_direct_mv(frame_refs, temporal_mv, cur_id, blk4);
752 self.get_cur_blk4(blk4).mv = [mv0, mv1];
753 self.get_cur_blk8(blk4_to_blk8(blk4)).ref_idx = [ref0, ref1];
755 let (mv0, ref0, mv1, ref1) = self.get_direct_mv(frame_refs, temporal_mv, cur_id, blk4);
756 self.get_cur_blk4(blk4).mv = [mv0, mv1];
757 self.get_cur_blk8(blk4_to_blk8(blk4)).ref_idx = [ref0, ref1];
760 pub fn get_direct_mv(&self, frame_refs: &FrameRefs, temporal_mv: bool, cur_id: u16, blk4: usize) -> (MV, PicRef, MV, PicRef) {
761 let (mbi, r1_poc, r1_long) = frame_refs.get_colocated_info(self.mb_x, self.mb_y);
762 let blk8 = blk4_to_blk8(blk4);
763 let (col_mv, r0_poc, col_idx) = if mbi.ref_poc[blk8] == [MISSING_POC; 2] {
764 (ZERO_MV, MISSING_POC, MISSING_REF)
765 } else if mbi.ref_poc[blk8][0] != MISSING_POC {
766 (mbi.mv[blk4][0], mbi.ref_poc[blk8][0], mbi.ref_idx[blk8][0])
768 (mbi.mv[blk4][1], mbi.ref_poc[blk8][1], mbi.ref_idx[blk8][1])
770 let (col_ref, r0_long) = frame_refs.map_ref0(r0_poc);
772 let td = (i32::from(r1_poc) - i32::from(r0_poc)).max(-128).min(127);
773 if r0_long || td == 0 {
774 (col_mv, col_ref, ZERO_MV, ZERO_REF)
776 let tx = (16384 + (td / 2).abs()) / td;
777 let tb = (i32::from(cur_id) - i32::from(r0_poc)).max(-128).min(127);
778 let scale = ((tb * tx + 32) >> 6).max(-1024).min(1023);
780 x: ((i32::from(col_mv.x) * scale + 128) >> 8) as i16,
781 y: ((i32::from(col_mv.y) * scale + 128) >> 8) as i16,
783 let mv1 = mv0 - col_mv;
784 (mv0, col_ref, mv1, ZERO_REF)
787 let blk4 = 0; // we generate the same MV prediction for the whole MB
788 let blk8 = blk4_to_blk8(blk4);
789 let midx = self.get_cur_blk4_idx(blk4);
790 let ridx = self.get_cur_blk8_idx(blk8);
791 let ridx_c = self.get_cur_blk8_idx(blk8) + 16 / 8 - self.blk8.stride;
793 let mv_a = self.blk4.data[midx - 1].mv;
794 let mv_b = self.blk4.data[midx - self.blk4.stride].mv;
795 let mut mv_c = self.blk4.data[midx - self.blk4.stride + 16 / 4].mv;
797 let ref_a = self.blk8.data[ridx - 1].ref_idx;
798 let ref_b = self.blk8.data[ridx - self.blk8.stride].ref_idx;
799 let mut ref_c = self.blk8.data[ridx_c].ref_idx;
801 if ref_c == [MISSING_REF; 2] {
802 mv_c = self.blk4.data[midx - self.blk4.stride - 1].mv;
803 ref_c = self.blk8.data[ridx - self.blk8.stride - 1].ref_idx;
805 let mut refs = [INVALID_REF; 2];
806 for cur_ref in [ref_a, ref_b, ref_c].iter() {
807 refs[0] = refs[0].min_pos(cur_ref[0]);
808 refs[1] = refs[1].min_pos(cur_ref[1]);
810 if refs == [INVALID_REF; 2] {
811 return (ZERO_MV, ZERO_REF, ZERO_MV, ZERO_REF);
814 let mut col_zero = true;
815 if r1_long || col_idx != ZERO_REF {
818 if col_mv.x.abs() > 1 || col_mv.y.abs() > 1 {
821 let mut mvs = [ZERO_MV; 2];
823 if mbi.mb_type.is_intra() || (!refs[ref_l].not_avail() && !(refs[ref_l] == ZERO_REF && col_zero)) {
824 let ref_idx = refs[ref_l];
825 mvs[ref_l] = if ref_b[ref_l] == MISSING_REF && ref_c[ref_l] == MISSING_REF {
828 let count = ((ref_a[ref_l] == ref_idx) as u8) + ((ref_b[ref_l] == ref_idx) as u8) + ((ref_c[ref_l] == ref_idx) as u8);
830 if ref_a[ref_l] == ref_idx {
832 } else if ref_b[ref_l] == ref_idx {
838 MV::pred(mv_a[ref_l], mv_b[ref_l], mv_c[ref_l])
843 (mvs[0], refs[0], mvs[1], refs[1])
846 pub fn fill_mv(&mut self, xpos: usize, ypos: usize, bw: usize, bh: usize, ref_l: usize, mv: MV) {
847 let start = self.get_cur_blk4_idx(0) + xpos / 4 + ypos / 4 * self.blk4.stride;
848 for row in self.blk4.data[start..].chunks_mut(self.blk4.stride).take(bh / 4) {
849 for blk in row[..bw / 4].iter_mut() {
854 pub fn fill_mvd(&mut self, xpos: usize, ypos: usize, bw: usize, bh: usize, ref_l: usize, mv: MV) {
855 let mvd = MV{ x: mv.x.abs().min(128), y: mv.y.abs().min(128) };
856 let start = self.get_cur_blk4_idx(0) + xpos / 4 + ypos / 4 * self.blk4.stride;
857 for row in self.blk4.data[start..].chunks_mut(self.blk4.stride).take(bh / 4) {
858 for blk in row[..bw / 4].iter_mut() {
859 blk.mvd[ref_l] = mvd;
863 pub fn fill_ref(&mut self, xpos: usize, ypos: usize, bw: usize, bh: usize, ref_l: usize, ref_idx: PicRef) {
864 let start = self.get_cur_blk8_idx(0) + xpos / 8 + ypos / 8 * self.blk8.stride;
865 if bw < 8 || bh < 8 {
866 self.blk8.data[start].ref_idx[ref_l] = ref_idx;
868 for row in self.blk8.data[start..].chunks_mut(self.blk8.stride).take(bh / 8) {
869 for blk in row[..bw / 8].iter_mut() {
870 blk.ref_idx[ref_l] = ref_idx;
877 fn mvdiff4(mv1: MV, mv2: MV) -> bool {
879 (mv.x.abs() >= 4) || (mv.y.abs() >= 4)