1 use nihav_codec_support::codecs::{MV, ZERO_MV};
4 fn scale(&self, trd: u32, trb: u32) -> (MV, MV);
5 fn diff_gt_3(self, other: Self) -> bool;
8 impl RV34MVOps for MV {
9 fn scale(&self, trd: u32, trb: u32) -> (MV, MV) {
10 const TR_SHIFT: u8 = 14;
11 const TR_BIAS: i32 = 1 << (TR_SHIFT - 1);
13 let ratio = ((trb as i32) << TR_SHIFT) / (trd as i32);
15 x: (((self.x as i32) * ratio + TR_BIAS) >> TR_SHIFT) as i16,
16 y: (((self.y as i32) * ratio + TR_BIAS) >> TR_SHIFT) as i16
18 let mv_b = mv_f - *self;
21 fn diff_gt_3(self, other: Self) -> bool {
22 let diff = self - other;
23 diff.x.abs() > 3 || diff.y.abs() > 3
27 #[derive(Debug,Clone,Copy)]
28 pub enum PredType4x4 {
46 #[derive(Debug,Clone,Copy)]
47 pub enum PredType8x8 {
58 fn to_index(self) -> i8;
61 impl ToIndex for PredType8x8 {
62 fn to_index(self) -> i8 {
64 PredType8x8::Ver => 1,
65 PredType8x8::Hor => 2,
66 PredType8x8::Plane => 3,
72 impl ToIndex for PredType4x4 {
73 fn to_index(self) -> i8 {
75 PredType4x4::Ver => 1,
76 PredType4x4::Hor => 2,
77 PredType4x4::DiagDownRight => 3,
78 PredType4x4::DiagDownLeft | PredType4x4::DiagDownLeftNoDown => 4,
79 PredType4x4::VerRight => 5,
80 PredType4x4::VerLeft | PredType4x4::VerLeftNoDown => 6,
81 PredType4x4::HorUp |PredType4x4::HorUpNoDown => 7,
82 PredType4x4::HorDown => 8,
83 _ => 0, // DC predictions
88 #[derive(Clone,Copy,Default)]
90 pub coeffs: [i16; 16],
94 pub fn new() -> Self { Self::default() }
95 pub fn is_empty(&self) -> bool {
96 for &el in self.coeffs.iter() {
103 pub fn count_nz(&self) -> usize {
104 self.coeffs.iter().filter(|&&x| x != 0).count()
107 impl std::fmt::Display for Block {
108 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
109 let mut out = String::new();
110 for row in self.coeffs.chunks(4) {
111 out += format!(" {:3} {:3} {:3} {:3}\n", row[0], row[1], row[2], row[3]).as_str();
117 #[derive(Clone,Copy,Default)]
118 pub struct DeblockInfo {
126 #[derive(Debug,Clone,Copy,PartialEq)]
144 pub fn is_intra(self) -> bool { matches!(self, MBType::Intra | MBType::Intra16) }
145 fn get_weight(self) -> u8 {
148 MBType::Intra16 => 1,
149 MBType::Skip => unreachable!(),
151 MBType::P16x16Mix => 10,
157 MBType::Forward => 4,
158 MBType::Backward => 5,
159 MBType::Invalid => unreachable!(),
162 pub fn to_code(self) -> usize {
165 MBType::Intra16 => 1,
166 MBType::P16x16 | MBType::Forward => 2,
167 MBType::P8x8 | MBType::Backward => 3,
168 MBType::P16x8 | MBType::Bidir => 4,
169 MBType::P8x16 | MBType::Direct => 5,
170 MBType::P16x16Mix => 6,
174 pub fn has_dir_mv(self, fwd: bool) -> bool {
176 MBType::Bidir => true,
177 MBType::Forward if fwd => true,
178 MBType::Backward if !fwd => true,
185 pub struct SliceState {
195 pub fn new() -> Self { Self::default() }
200 pub mb_type: Vec<MBType>,
205 pub mb_stride: usize,
206 pub blk8_stride: usize,
207 pub blk4_stride: usize,
211 pub fn new() -> Self { Self::default() }
212 pub fn resize(&mut self, mb_w: usize, mb_h: usize) {
213 self.mb_stride = mb_w + 2;
214 self.blk8_stride = mb_w * 2 + 2;
215 self.blk4_stride = mb_w * 4 + 2;
217 self.mb_type.resize(self.mb_stride * (mb_h + 1), MBType::Invalid);
218 self.ipred.resize(self.blk4_stride * (mb_h * 4 + 1), -1);
219 self.fwd_mv.resize(self.blk8_stride * (mb_w * 2 + 1), ZERO_MV);
220 self.bwd_mv.resize(self.blk8_stride * (mb_w * 2 + 1), ZERO_MV);
221 self.ref_mv.resize(self.blk8_stride * (mb_w * 2 + 1), ZERO_MV);
223 pub fn reset(&mut self) {
224 for el in self.mb_type.iter_mut() {
225 *el = MBType::Invalid;
227 for el in self.ipred.iter_mut() {
231 fn set_mv(&mut self, blk8_idx: usize, fwd: bool, mv: MV) {
233 self.fwd_mv[blk8_idx] = mv;
234 self.fwd_mv[blk8_idx + 1] = mv;
235 self.fwd_mv[blk8_idx + self.blk8_stride] = mv;
236 self.fwd_mv[blk8_idx + self.blk8_stride + 1] = mv;
238 self.bwd_mv[blk8_idx] = mv;
239 self.bwd_mv[blk8_idx + 1] = mv;
240 self.bwd_mv[blk8_idx + self.blk8_stride] = mv;
241 self.bwd_mv[blk8_idx + self.blk8_stride + 1] = mv;
244 pub fn get_mb_idx(&self, mb_x: usize, mb_y: usize) -> usize {
245 mb_x + 1 + (mb_y + 1) * self.mb_stride
247 pub fn get_blk8_idx(&self, mb_x: usize, mb_y: usize) -> usize {
248 mb_x * 2 + 1 + (mb_y * 2 + 1) * self.blk8_stride
250 pub fn get_blk4_idx(&self, mb_x: usize, mb_y: usize) -> usize {
251 mb_x * 4 + 1 + (mb_y * 4 + 1) * self.blk4_stride
253 pub fn update(&mut self, mb_type: &MacroblockType, mb_x: usize, mb_y: usize) {
254 let mb_idx = self.get_mb_idx(mb_x, mb_y);
255 let blk8_idx = self.get_blk8_idx(mb_x, mb_y);
256 let blk4_idx = self.get_blk4_idx(mb_x, mb_y);
258 for row in self.ipred[blk4_idx..].chunks_mut(self.blk4_stride).take(4) {
259 for el in row[..4].iter_mut() {
265 MacroblockType::Intra16x16(ptype) => {
266 self.mb_type[mb_idx] = MBType::Intra16;
267 let pred_id = ptype.to_index();
268 for row in self.ipred[blk4_idx..].chunks_mut(self.blk4_stride).take(4) {
269 for el in row[..4].iter_mut() {
273 self.set_mv(blk8_idx, true, ZERO_MV);
274 self.set_mv(blk8_idx, false, ZERO_MV);
276 MacroblockType::Intra4x4(ptypes) => {
277 self.mb_type[mb_idx] = MBType::Intra;
278 for (dst, src) in self.ipred[blk4_idx..].chunks_mut(self.blk4_stride).zip(ptypes.chunks(4)) {
279 for (dst, &ptype) in dst.iter_mut().zip(src.iter()) {
280 *dst = ptype.to_index();
283 self.set_mv(blk8_idx, true, ZERO_MV);
284 self.set_mv(blk8_idx, false, ZERO_MV);
286 MacroblockType::PSkip => {
287 self.mb_type[mb_idx] = MBType::Skip;
288 self.set_mv(blk8_idx, true, ZERO_MV);
289 self.set_mv(blk8_idx, false, ZERO_MV);
291 MacroblockType::Inter16x16(mv) => {
292 self.mb_type[mb_idx] = MBType::P16x16;
293 self.set_mv(blk8_idx, true, mv);
294 self.set_mv(blk8_idx, false, ZERO_MV);
296 MacroblockType::InterMix(mv) => {
297 self.mb_type[mb_idx] = MBType::P16x16Mix;
298 self.set_mv(blk8_idx, true, mv);
299 self.set_mv(blk8_idx, false, ZERO_MV);
301 MacroblockType::Inter16x8(mvs) => {
302 self.mb_type[mb_idx] = MBType::P16x8;
303 self.fwd_mv[blk8_idx] = mvs[0];
304 self.fwd_mv[blk8_idx + 1] = mvs[0];
305 self.fwd_mv[blk8_idx + self.blk8_stride] = mvs[1];
306 self.fwd_mv[blk8_idx + self.blk8_stride + 1] = mvs[1];
307 self.set_mv(blk8_idx, false, ZERO_MV);
309 MacroblockType::Inter8x16(mvs) => {
310 self.mb_type[mb_idx] = MBType::P8x16;
311 self.fwd_mv[blk8_idx] = mvs[0];
312 self.fwd_mv[blk8_idx + 1] = mvs[1];
313 self.fwd_mv[blk8_idx + self.blk8_stride] = mvs[0];
314 self.fwd_mv[blk8_idx + self.blk8_stride + 1] = mvs[1];
315 self.set_mv(blk8_idx, false, ZERO_MV);
317 MacroblockType::Inter8x8(mvs) => {
318 self.mb_type[mb_idx] = MBType::P8x8;
319 self.fwd_mv[blk8_idx] = mvs[0];
320 self.fwd_mv[blk8_idx + 1] = mvs[1];
321 self.fwd_mv[blk8_idx + self.blk8_stride] = mvs[2];
322 self.fwd_mv[blk8_idx + self.blk8_stride + 1] = mvs[3];
323 self.set_mv(blk8_idx, false, ZERO_MV);
325 MacroblockType::BSkip(fmvs, bmvs) => {
326 self.mb_type[mb_idx] = MBType::Skip;
327 self.fwd_mv[blk8_idx] = fmvs[0];
328 self.fwd_mv[blk8_idx + 1] = fmvs[1];
329 self.fwd_mv[blk8_idx + self.blk8_stride] = fmvs[0];
330 self.fwd_mv[blk8_idx + self.blk8_stride + 1] = fmvs[1];
331 self.bwd_mv[blk8_idx] = bmvs[0];
332 self.bwd_mv[blk8_idx + 1] = bmvs[1];
333 self.bwd_mv[blk8_idx + self.blk8_stride] = bmvs[0];
334 self.bwd_mv[blk8_idx + self.blk8_stride + 1] = bmvs[1];
336 /*MacroblockType::Direct(fmv, bmv) => {
337 self.mb_type[mb_idx] = MBType::Direct;
338 self.set_mv(blk8_idx, true, fmv);
339 self.set_mv(blk8_idx, false, bmv);
341 MacroblockType::Bidir(fmv, bmv) => {
342 self.mb_type[mb_idx] = MBType::Bidir;
343 self.set_mv(blk8_idx, true, fmv);
344 self.set_mv(blk8_idx, false, bmv);
346 MacroblockType::Forward(mv) => {
347 self.mb_type[mb_idx] = MBType::Forward;
348 self.set_mv(blk8_idx, true, mv);
349 self.set_mv(blk8_idx, false, ZERO_MV);
351 MacroblockType::Backward(mv) => {
352 self.mb_type[mb_idx] = MBType::Backward;
353 self.set_mv(blk8_idx, true, ZERO_MV);
354 self.set_mv(blk8_idx, false, mv);
358 pub fn get_pred_mbtype(&self, sstate: &SliceState, is_b: bool) -> MBType {
359 let mut cand = [MBType::Invalid; 4];
362 let mb_idx = self.get_mb_idx(sstate.mb_x, sstate.mb_y);
364 cand[ccount] = self.mb_type[mb_idx - self.mb_stride];
367 cand[ccount] = self.mb_type[mb_idx - self.mb_stride + 1];
372 cand[ccount] = self.mb_type[mb_idx - 1];
376 cand[ccount] = self.mb_type[mb_idx - self.mb_stride - 1];
380 for el in cand[..ccount].iter_mut() {
381 if *el == MBType::Skip {
382 *el = MBType::P16x16;
386 for el in cand[..ccount].iter_mut() {
387 if *el == MBType::Skip {
388 *el = MBType::Direct;
395 2 => if cand[0].get_weight() <= cand[1].get_weight() { cand[0] } else { cand[1] },
397 const MBTYPE_FROM_WEIGHT: [MBType; 11] = [
398 MBType::Intra, MBType::Intra16, MBType::P16x16, MBType::P8x8,
399 MBType::Forward, MBType::Backward, MBType::Direct, MBType::P16x8,
400 MBType::P8x16, MBType::Bidir, MBType::P16x16Mix
403 let mut counts = [0; 12];
404 for el in cand[..ccount].iter() {
405 counts[usize::from(el.get_weight())] += 1;
407 let mut best_idx = 0;
408 let mut best_wgt = 0;
409 for (idx, &weight) in counts.iter().enumerate() {
410 if weight > best_wgt {
415 MBTYPE_FROM_WEIGHT[best_idx]
419 pub fn get_ipred4x4_ctx(&self, mb_x: usize, mb_y: usize, x: usize, y: usize) -> (i8, i8, i8) {
420 let blk4_idx = self.get_blk4_idx(mb_x, mb_y) + x + y * self.blk4_stride;
421 (self.ipred[blk4_idx - 1],
422 self.ipred[blk4_idx - self.blk4_stride],
423 self.ipred[blk4_idx - self.blk4_stride + 1])
425 pub fn set_ipred4x4(&mut self, mb_x: usize, mb_y: usize, modes: &[PredType4x4; 16]) {
426 let blk4_idx = self.get_blk4_idx(mb_x, mb_y);
427 for (dst, src) in self.ipred[blk4_idx..].chunks_mut(self.blk4_stride).zip(modes.chunks(4)) {
428 for (dst, src) in dst.iter_mut().zip(src.iter()) {
429 *dst = src.to_index();
433 fn get_mv(&self, idx: usize, fwd: bool) -> MV {
440 pub fn get_diff_mv(&self, sstate: &SliceState, w16: bool, xoff: usize, yoff: usize) -> MV {
441 let blk8_idx = self.get_blk8_idx(sstate.mb_x, sstate.mb_y) + xoff + yoff * self.blk8_stride;
443 let cur_mv = self.get_mv(blk8_idx, true);
445 if (yoff == 0 && !sstate.has_t) && (xoff == 0 && !sstate.has_l) {
449 let left_mv = if sstate.has_l || (xoff != 0) { self.get_mv(blk8_idx - 1, true) } else { ZERO_MV };
450 let top_mv = if sstate.has_t || (yoff != 0) { self.get_mv(blk8_idx - self.blk8_stride, true) } else { left_mv };
451 let has_tr = match xoff + yoff * 2 {
452 0 if w16 => sstate.has_tr,
459 let has_tl = match xoff + yoff * 2 {
465 let mv_c = if has_tr {
466 self.get_mv(blk8_idx - self.blk8_stride + if w16 { 2 } else { 1 }, true)
468 self.get_mv(blk8_idx - self.blk8_stride - 1, true)
470 return cur_mv - left_mv;
473 cur_mv - MV::pred(left_mv, top_mv, mv_c)
475 pub fn get_diff_mv_b(&self, sstate: &SliceState, fwd: bool) -> MV {
476 let mb_idx = self.get_mb_idx(sstate.mb_x, sstate.mb_y);
477 let blk8_idx = self.get_blk8_idx(sstate.mb_x, sstate.mb_y);
479 let mut pred_mv = [ZERO_MV; 3];
482 let cur_mv = self.get_mv(blk8_idx, fwd);
484 if sstate.has_l && self.mb_type[mb_idx - 1].has_dir_mv(fwd) {
485 pred_mv[pcount] = self.get_mv(blk8_idx - 1, fwd);
489 return cur_mv - pred_mv[0];
491 if self.mb_type[mb_idx - self.mb_stride].has_dir_mv(fwd) {
492 pred_mv[pcount] = self.get_mv(blk8_idx - self.blk8_stride, fwd);
496 if self.mb_type[mb_idx - self.mb_stride + 1].has_dir_mv(fwd) {
497 pred_mv[pcount] = self.get_mv(blk8_idx - self.blk8_stride + 2, fwd);
500 } else if sstate.has_tl && self.mb_type[mb_idx - self.mb_stride - 1].has_dir_mv(fwd) {
501 pred_mv[pcount] = self.get_mv(blk8_idx - self.blk8_stride - 1, fwd);
504 let pred_mv = match pcount {
505 3 => MV::pred(pred_mv[0], pred_mv[1], pred_mv[2]),
506 2 => MV{ x: (pred_mv[0].x + pred_mv[1].x) / 2, y: (pred_mv[0].y + pred_mv[1].y) / 2 },
512 pub fn swap_mvs(&mut self) {
513 std::mem::swap(&mut self.fwd_mv, &mut self.ref_mv);
515 pub fn fill_deblock(&self, dblk: &mut DeblockInfo, sstate: &SliceState) {
517 dblk.deblock_y = 0xFFFF;
523 let mut blk8_idx = self.get_blk8_idx(sstate.mb_x, sstate.mb_y);
526 let shift = x * 2 + y * 8;
527 let cur_mv = self.get_mv(blk8_idx + x, true);
528 if (x > 0) || (sstate.mb_x > 0) {
529 let left_mv = self.get_mv(blk8_idx + x - 1, true);
530 if cur_mv.diff_gt_3(left_mv) {
531 vmvmask |= 0x11 << shift;
534 if (y > 0) || (sstate.mb_y > 0) {
535 let top_mv = self.get_mv(blk8_idx + x - self.blk8_stride, true);
536 if cur_mv.diff_gt_3(top_mv) {
537 hmvmask |= 0x03 << shift;
541 blk8_idx += self.blk8_stride;
543 if sstate.mb_y == 0 { hmvmask &= !0x000F; }
544 if sstate.mb_x == 0 { vmvmask &= !0x1111; }
546 dblk.deblock_y = dblk.cbp_y | hmvmask | vmvmask;
551 pub enum MacroblockType {
552 Intra16x16(PredType8x8),
553 Intra4x4([PredType4x4; 16]),
560 BSkip([MV; 4], [MV; 4]),
567 impl Default for MacroblockType {
568 fn default() -> Self { Self::Intra16x16(PredType8x8::DC) }
571 impl MacroblockType {
572 pub fn is_intra(&self) -> bool {
573 matches!(*self, MacroblockType::Intra16x16(_) | MacroblockType::Intra4x4(_))
575 pub fn is_16(&self) -> bool {
576 matches!(*self, MacroblockType::Intra16x16(_) | MacroblockType::InterMix(_))
578 pub fn is_skip(&self) -> bool {
579 matches!(*self, MacroblockType::PSkip | MacroblockType::BSkip(_, _))
583 pub struct Macroblock {
584 pub mb_type: MacroblockType,
585 pub coeffs: [Block; 25],