From: Kostya Shishkov Date: Thu, 13 May 2021 09:54:55 +0000 (+0200) Subject: h264: make debug and release versions of MC functions for better performance X-Git-Url: https://git.nihav.org/?a=commitdiff_plain;h=999fbb839c077549a29cb84db47a0b0b65a2c0ef;p=nihav.git h264: make debug and release versions of MC functions for better performance --- diff --git a/nihav-itu/src/codecs/h264/dsp/debug.rs b/nihav-itu/src/codecs/h264/dsp/debug.rs new file mode 100644 index 0000000..640e597 --- /dev/null +++ b/nihav-itu/src/codecs/h264/dsp/debug.rs @@ -0,0 +1,212 @@ +use nihav_codec_support::codecs::blockdsp::*; + +use super::clip_u8; + +const TMP_BUF_STRIDE: usize = 32; + +fn interp_block1(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize, hor: bool, avg0: bool) { + + let step = if hor { 1 } else { sstride }; + let mut idx = 0; + let avgidx = if avg0 { step * 2 } else { step * 3 }; + + for dline in dst.chunks_mut(dstride).take(h) { + for (x, pix) in dline.iter_mut().take(w).enumerate() { + let t = clip_u8(( i16::from(src[idx + x]) + - 5 * i16::from(src[idx + x + step]) + + 20 * i16::from(src[idx + x + step * 2]) + + 20 * i16::from(src[idx + x + step * 3]) + - 5 * i16::from(src[idx + x + step * 4]) + + i16::from(src[idx + x + step * 5]) + + 16) >> 5); + *pix = ((u16::from(t) + u16::from(src[idx + x + avgidx]) + 1) >> 1) as u8; + } + idx += sstride; + } +} + +fn interp_block2(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize, hor: bool) { + let step = if hor { 1 } else { sstride }; + let mut idx = 0; + for dline in dst.chunks_mut(dstride).take(h) { + for (x, pix) in dline.iter_mut().take(w).enumerate() { + *pix = clip_u8(( i16::from(src[idx + x]) + - 5 * i16::from(src[idx + x + step]) + + 20 * i16::from(src[idx + x + step * 2]) + + 20 * i16::from(src[idx + x + step * 3]) + - 5 * i16::from(src[idx + x + step * 4]) + + i16::from(src[idx + x + step * 5]) + + 16) >> 5); + } + idx += sstride; + } +} + +fn mc_avg_tmp(dst: &mut [u8], dstride: usize, w: usize, h: usize, tmp: &[u8], tmp2: &[u8]) { + for (dline, (sline0, sline1)) in dst.chunks_mut(dstride).zip(tmp.chunks(TMP_BUF_STRIDE).zip(tmp2.chunks(TMP_BUF_STRIDE))).take(h) { + for (pix, (&a, &b)) in dline.iter_mut().zip(sline0.iter().zip(sline1.iter())).take(w) { + *pix = ((u16::from(a) + u16::from(b) + 1) >> 1) as u8; + } + } +} + +fn h264_mc00(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + for (dline, sline) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { + dline[..w].copy_from_slice(&sline[..w]); + } +} + +fn h264_mc01(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block1(dst, dstride, &src[sstride * 2..], sstride, w, h, true, true); +} + +fn h264_mc02(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block2(dst, dstride, &src[sstride * 2..], sstride, w, h, true); +} + +fn h264_mc03(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block1(dst, dstride, &src[sstride * 2..], sstride, w, h, true, false); +} + +fn h264_mc10(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block1(dst, dstride, &src[2..], sstride, w, h, false, true); +} + +fn h264_mc11(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; + let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; + h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc20(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc12(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; + let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; + h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc22(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc13(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; + let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; + h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc20(&mut tmp2, TMP_BUF_STRIDE, &src[1..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc20(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block2(dst, dstride, &src[2..], sstride, w, h, false); +} + +fn h264_mc21(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; + let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; + h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc20(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc22(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0i32; TMP_BUF_STRIDE * 16]; + let mut idx = 0; + for dline in tmp.chunks_mut(TMP_BUF_STRIDE).take(h) { + for (x, pix) in dline.iter_mut().take(w + 5).enumerate() { + *pix = i32::from(src[idx + x]) + - 5 * i32::from(src[idx + x + sstride]) + + 20 * i32::from(src[idx + x + sstride * 2]) + + 20 * i32::from(src[idx + x + sstride * 3]) + - 5 * i32::from(src[idx + x + sstride * 4]) + + i32::from(src[idx + x + sstride * 5]); + } + idx += sstride; + } + for (dline, sline) in dst.chunks_mut(dstride).zip(tmp.chunks(TMP_BUF_STRIDE)).take(h) { + for (x, pix) in dline.iter_mut().take(w).enumerate() { + *pix = clip_u8(((sline[x] - 5 * sline[x + 1] + 20 * sline[x + 2] + 20 * sline[x + 3] - 5 * sline[x + 4] + sline[x + 5] + 512) >> 10) as i16); + } + } +} + +fn h264_mc23(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; + let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; + h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc20(&mut tmp2, TMP_BUF_STRIDE, &src[1..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc30(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block1(dst, dstride, &src[2..], sstride, w, h, false, false); +} + +fn h264_mc31(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; + let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; + h264_mc20(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc32(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; + let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; + h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc33(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; + let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; + h264_mc20(&mut tmp, TMP_BUF_STRIDE, &src[1..], sstride, w, h); + h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + + +pub fn chroma_interp(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, dx: u16, dy: u16, w: usize, h: usize) { + let a0 = 8 - dx; + let a1 = dx; + let b0 = 8 - dy; + let b1 = dy; + + let src1 = &src[sstride..]; + if a0 == 8 && b0 == 8 { + for (drow, line) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { + drow[..w].copy_from_slice(&line[..w]); + } + } else if a0 == 8 { + for (drow, (line0, line1)) in dst.chunks_mut(dstride).zip(src.chunks(sstride).zip(src1.chunks(sstride))).take(h) { + for (pix, (&a, &b)) in drow.iter_mut().take(w).zip(line0.iter().zip(line1.iter())) { + *pix = ((u16::from(a) * b0 + u16::from(b) * b1 + 4) >> 3) as u8; + } + } + } else if b0 == 8 { + for (drow, line) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { + let mut a = line[0]; + for (pix, &b) in drow.iter_mut().take(w).zip(line.iter().skip(1)) { + *pix = ((u16::from(a) * a0 + u16::from(b) * a1 + 4) >> 3) as u8; + a = b; + } + } + } else { + for (drow, (line0, line1)) in dst.chunks_mut(dstride).zip(src.chunks(sstride).zip(src1.chunks(sstride))).take(h) { + let mut a = line0[0]; + let mut c = line1[0]; + for (pix, (&b, &d)) in drow.iter_mut().take(w).zip(line0[1..].iter().zip(line1[1..].iter())) { + *pix = ((u16::from(a) * a0 * b0 + u16::from(b) * a1 * b0 + u16::from(c) * a0 * b1 + u16::from(d) * a1 * b1 + 0x20) >> 6) as u8; + a = b; + c = d; + } + } + } +} + +pub const H264_LUMA_INTERP: &[BlkInterpFunc] = &[ + h264_mc00, h264_mc01, h264_mc02, h264_mc03, + h264_mc10, h264_mc11, h264_mc12, h264_mc13, + h264_mc20, h264_mc21, h264_mc22, h264_mc23, + h264_mc30, h264_mc31, h264_mc32, h264_mc33 +]; diff --git a/nihav-itu/src/codecs/h264/dsp.rs b/nihav-itu/src/codecs/h264/dsp/mod.rs similarity index 81% rename from nihav-itu/src/codecs/h264/dsp.rs rename to nihav-itu/src/codecs/h264/dsp/mod.rs index 87dc8d9..69df7d6 100644 --- a/nihav-itu/src/codecs/h264/dsp.rs +++ b/nihav-itu/src/codecs/h264/dsp/mod.rs @@ -2,6 +2,15 @@ use nihav_core::frame::*; use nihav_codec_support::codecs::blockdsp::*; use nihav_codec_support::codecs::MV; +#[cfg(not(debug_assertions))] +mod release; +#[cfg(not(debug_assertions))] +use release::*; +#[cfg(debug_assertions)] +mod debug; +#[cfg(debug_assertions)] +use debug::*; + pub const CHROMA_QUANTS: [u8; 52] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 29, 30, @@ -928,213 +937,6 @@ pub const IPRED_FUNCS16X16: [IPred8x8Func; 7] = [ fn clip_u8(val: i16) -> u8 { val.max(0).min(255) as u8 } -const TMP_BUF_STRIDE: usize = 32; - -fn interp_block1(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize, hor: bool, avg0: bool) { - let step = if hor { 1 } else { sstride }; - let mut idx = 0; - let avgidx = if avg0 { step * 2 } else { step * 3 }; - for dline in dst.chunks_mut(dstride).take(h) { - for (x, pix) in dline.iter_mut().take(w).enumerate() { - let t = clip_u8(( i16::from(src[idx + x]) - - 5 * i16::from(src[idx + x + step]) - + 20 * i16::from(src[idx + x + step * 2]) - + 20 * i16::from(src[idx + x + step * 3]) - - 5 * i16::from(src[idx + x + step * 4]) - + i16::from(src[idx + x + step * 5]) - + 16) >> 5); - *pix = ((u16::from(t) + u16::from(src[idx + x + avgidx]) + 1) >> 1) as u8; - } - idx += sstride; - } -} - -fn interp_block2(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize, hor: bool) { - let step = if hor { 1 } else { sstride }; - let mut idx = 0; - for dline in dst.chunks_mut(dstride).take(h) { - for (x, pix) in dline.iter_mut().take(w).enumerate() { - *pix = clip_u8(( i16::from(src[idx + x]) - - 5 * i16::from(src[idx + x + step]) - + 20 * i16::from(src[idx + x + step * 2]) - + 20 * i16::from(src[idx + x + step * 3]) - - 5 * i16::from(src[idx + x + step * 4]) - + i16::from(src[idx + x + step * 5]) - + 16) >> 5); - } - idx += sstride; - } -} - -fn mc_avg_tmp(dst: &mut [u8], dstride: usize, w: usize, h: usize, tmp: &[u8], tmp2: &[u8]) { - for (dline, (sline0, sline1)) in dst.chunks_mut(dstride).zip(tmp.chunks(TMP_BUF_STRIDE).zip(tmp2.chunks(TMP_BUF_STRIDE))).take(h) { - for (pix, (&a, &b)) in dline.iter_mut().zip(sline0.iter().zip(sline1.iter())).take(w) { - *pix = ((u16::from(a) + u16::from(b) + 1) >> 1) as u8; - } - } -} - -fn h264_mc00(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - for (dline, sline) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { - dline[..w].copy_from_slice(&sline[..w]); - } -} - -fn h264_mc01(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - interp_block1(dst, dstride, &src[sstride * 2..], sstride, w, h, true, true); -} - -fn h264_mc02(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - interp_block2(dst, dstride, &src[sstride * 2..], sstride, w, h, true); -} - -fn h264_mc03(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - interp_block1(dst, dstride, &src[sstride * 2..], sstride, w, h, true, false); -} - -fn h264_mc10(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - interp_block1(dst, dstride, &src[2..], sstride, w, h, false, true); -} - -fn h264_mc11(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; - let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; - h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); - h264_mc20(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); - mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); -} - -fn h264_mc12(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; - let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; - h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); - h264_mc22(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); - mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); -} - -fn h264_mc13(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; - let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; - h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); - h264_mc20(&mut tmp2, TMP_BUF_STRIDE, &src[1..], sstride, w, h); - mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); -} - -fn h264_mc20(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - interp_block2(dst, dstride, &src[2..], sstride, w, h, false); -} - -fn h264_mc21(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; - let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; - h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); - h264_mc20(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); - mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); -} - -fn h264_mc22(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0i32; TMP_BUF_STRIDE * 16]; - let mut idx = 0; - for dline in tmp.chunks_mut(TMP_BUF_STRIDE).take(h) { - for (x, pix) in dline.iter_mut().take(w + 5).enumerate() { - *pix = i32::from(src[idx + x]) - - 5 * i32::from(src[idx + x + sstride]) - + 20 * i32::from(src[idx + x + sstride * 2]) - + 20 * i32::from(src[idx + x + sstride * 3]) - - 5 * i32::from(src[idx + x + sstride * 4]) - + i32::from(src[idx + x + sstride * 5]); - } - idx += sstride; - } - for (dline, sline) in dst.chunks_mut(dstride).zip(tmp.chunks(TMP_BUF_STRIDE)).take(h) { - for (x, pix) in dline.iter_mut().take(w).enumerate() { - *pix = clip8(((sline[x] - 5 * sline[x + 1] + 20 * sline[x + 2] + 20 * sline[x + 3] - 5 * sline[x + 4] + sline[x + 5] + 512) >> 10) as i16); - } - } -} - -fn h264_mc23(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; - let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; - h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); - h264_mc20(&mut tmp2, TMP_BUF_STRIDE, &src[1..], sstride, w, h); - mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); -} - -fn h264_mc30(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - interp_block1(dst, dstride, &src[2..], sstride, w, h, false, false); -} - -fn h264_mc31(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; - let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; - h264_mc20(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); - h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); - mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); -} - -fn h264_mc32(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; - let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; - h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); - h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); - mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); -} - -fn h264_mc33(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { - let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; - let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; - h264_mc20(&mut tmp, TMP_BUF_STRIDE, &src[1..], sstride, w, h); - h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); - mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); -} - - -fn chroma_interp(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, dx: u16, dy: u16, w: usize, h: usize) { - let a0 = 8 - dx; - let a1 = dx; - let b0 = 8 - dy; - let b1 = dy; - - let src1 = &src[sstride..]; - if a0 == 8 && b0 == 8 { - for (drow, line) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { - drow[..w].copy_from_slice(&line[..w]); - } - } else if a0 == 8 { - for (drow, (line0, line1)) in dst.chunks_mut(dstride).zip(src.chunks(sstride).zip(src1.chunks(sstride))).take(h) { - for (pix, (&a, &b)) in drow.iter_mut().take(w).zip(line0.iter().zip(line1.iter())) { - *pix = ((u16::from(a) * b0 + u16::from(b) * b1 + 4) >> 3) as u8; - } - } - } else if b0 == 8 { - for (drow, line) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { - let mut a = line[0]; - for (pix, &b) in drow.iter_mut().take(w).zip(line.iter().skip(1)) { - *pix = ((u16::from(a) * a0 + u16::from(b) * a1 + 4) >> 3) as u8; - a = b; - } - } - } else { - for (drow, (line0, line1)) in dst.chunks_mut(dstride).zip(src.chunks(sstride).zip(src1.chunks(sstride))).take(h) { - let mut a = line0[0]; - let mut c = line1[0]; - for (pix, (&b, &d)) in drow.iter_mut().take(w).zip(line0[1..].iter().zip(line1[1..].iter())) { - *pix = ((u16::from(a) * a0 * b0 + u16::from(b) * a1 * b0 + u16::from(c) * a0 * b1 + u16::from(d) * a1 * b1 + 0x20) >> 6) as u8; - a = b; - c = d; - } - } - } -} - -const H264_LUMA_INTERP: &[BlkInterpFunc] = &[ - h264_mc00, h264_mc01, h264_mc02, h264_mc03, - h264_mc10, h264_mc11, h264_mc12, h264_mc13, - h264_mc20, h264_mc21, h264_mc22, h264_mc23, - h264_mc30, h264_mc31, h264_mc32, h264_mc33 -]; - pub fn do_mc(frm: &mut NASimpleVideoFrame, refpic: NAVideoBufferRef, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) { let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize; copy_block(frm, refpic.clone(), 0, xpos, ypos, mv.x >> 2, mv.y >> 2, w, h, 2, 3, mode, H264_LUMA_INTERP); diff --git a/nihav-itu/src/codecs/h264/dsp/release.rs b/nihav-itu/src/codecs/h264/dsp/release.rs new file mode 100644 index 0000000..87b1bc5 --- /dev/null +++ b/nihav-itu/src/codecs/h264/dsp/release.rs @@ -0,0 +1,284 @@ +use nihav_codec_support::codecs::blockdsp::*; + +use super::clip_u8; + +const TMP_BUF_STRIDE: usize = 32; + +fn interp_block1(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize, hor: bool, avg0: bool) { + unsafe { + let step = if hor { 1 } else { sstride }; + let avgidx = if avg0 { step * 2 } else { step * 3 }; + let mut src = src.as_ptr(); + let mut dst = dst.as_mut_ptr(); + for _ in 0..h { + for _ in 0..w { + let t = clip_u8(( i16::from(*src) + - 5 * i16::from(*src.add(step)) + + 20 * i16::from(*src.add(step * 2)) + + 20 * i16::from(*src.add(step * 3)) + - 5 * i16::from(*src.add(step * 4)) + + i16::from(*src.add(step * 5)) + + 16) >> 5); + *dst = ((u16::from(t) + u16::from(*src.add(avgidx)) + 1) >> 1) as u8; + src = src.add(1); + dst = dst.add(1); + } + dst = dst.sub(w).add(dstride); + src = src.sub(w).add(sstride); + } + } +} + +fn interp_block2(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize, hor: bool) { + unsafe { + let step = if hor { 1 } else { sstride }; + let mut pix = dst.as_mut_ptr(); + let mut src = src.as_ptr(); + for _ in 0..h { + for x in 0..w { + *pix.add(x) = clip_u8(( i16::from(*src) + - 5 * i16::from(*src.add(step)) + + 20 * i16::from(*src.add(step * 2)) + + 20 * i16::from(*src.add(step * 3)) + - 5 * i16::from(*src.add(step * 4)) + + i16::from(*src.add(step * 5)) + + 16) >> 5); + src = src.add(1); + } + pix = pix.add(dstride); + src = src.sub(w); + src = src.add(sstride); + } + } +} + +fn mc_avg_tmp(dst: &mut [u8], dstride: usize, w: usize, h: usize, tmp: &[u8], tmp2: &[u8]) { + unsafe { + let mut src1 = tmp.as_ptr(); + let mut src2 = tmp2.as_ptr(); + let mut dst = dst.as_mut_ptr(); + for _ in 0..h { + for x in 0..w { + let a = *src1.add(x); + let b = *src2.add(x); + *dst.add(x) = ((u16::from(a) + u16::from(b) + 1) >> 1) as u8; + } + dst = dst.add(dstride); + src1 = src1.add(TMP_BUF_STRIDE); + src2 = src2.add(TMP_BUF_STRIDE); + } + } +} + +fn h264_mc00(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + unsafe { + let mut src = src.as_ptr(); + let mut dst = dst.as_mut_ptr(); + for _ in 0..h { + std::ptr::copy_nonoverlapping(src, dst, w); + src = src.add(sstride); + dst = dst.add(dstride); + } + } +} + +fn h264_mc01(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block1(dst, dstride, &src[sstride * 2..], sstride, w, h, true, true); +} + +fn h264_mc02(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block2(dst, dstride, &src[sstride * 2..], sstride, w, h, true); +} + +fn h264_mc03(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block1(dst, dstride, &src[sstride * 2..], sstride, w, h, true, false); +} + +fn h264_mc10(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block1(dst, dstride, &src[2..], sstride, w, h, false, true); +} + +fn h264_mc11(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp : [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut tmp2: [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc20(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc12(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp : [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut tmp2: [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc22(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc13(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp : [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut tmp2: [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc20(&mut tmp2, TMP_BUF_STRIDE, &src[1..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc20(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block2(dst, dstride, &src[2..], sstride, w, h, false); +} + +fn h264_mc21(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp : [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut tmp2: [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc20(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc22(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp: [i32; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + unsafe { + let mut src = src.as_ptr(); + let mut dst = tmp.as_mut_ptr(); + for _ in 0..h { + for _ in 0..w+5 { + *dst = i32::from(*src) + - 5 * i32::from(*src.add(sstride)) + + 20 * i32::from(*src.add(sstride * 2)) + + 20 * i32::from(*src.add(sstride * 3)) + - 5 * i32::from(*src.add(sstride * 4)) + + i32::from(*src.add(sstride * 5)); + dst = dst.add(1); + src = src.add(1); + } + src = src.sub(w+5).add(sstride); + dst = dst.sub(w+5).add(TMP_BUF_STRIDE); + } + } + unsafe { + let mut dst = dst.as_mut_ptr(); + let mut src = tmp.as_ptr(); + for _ in 0..h { + for _ in 0..w { + *dst = clip_u8(((*src - 5 * *src.add(1) + 20 * *src.add(2) + 20 * *src.add(3) - 5 * *src.add(4) + *src.add(5) + 512) >> 10) as i16); + dst = dst.add(1); + src = src.add(1); + } + dst = dst.sub(w).add(dstride); + src = src.sub(w).add(TMP_BUF_STRIDE); + } + } +} + +fn h264_mc23(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp : [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut tmp2: [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc20(&mut tmp2, TMP_BUF_STRIDE, &src[1..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc30(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + interp_block1(dst, dstride, &src[2..], sstride, w, h, false, false); +} + +fn h264_mc31(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp : [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut tmp2: [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + h264_mc20(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc32(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp : [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut tmp2: [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); + h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + +fn h264_mc33(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { + let mut tmp : [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + let mut tmp2: [u8; TMP_BUF_STRIDE * 16] = unsafe { std::mem::MaybeUninit::uninit().assume_init() }; + h264_mc20(&mut tmp, TMP_BUF_STRIDE, &src[1..], sstride, w, h); + h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); + mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); +} + + +pub fn chroma_interp(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, dx: u16, dy: u16, w: usize, h: usize) { + let a0 = 8 - dx; + let a1 = dx; + let b0 = 8 - dy; + let b1 = dy; + + if a0 == 8 && b0 == 8 { + unsafe { + let mut src = src.as_ptr(); + let mut dst = dst.as_mut_ptr(); + for _ in 0..h { + std::ptr::copy_nonoverlapping(src, dst, w); + src = src.add(sstride); + dst = dst.add(dstride); + } + } + } else if a0 == 8 { + unsafe { + let mut src0 = src.as_ptr(); + let mut src1 = src0.add(sstride); + let mut dst = dst.as_mut_ptr(); + for _ in 0..h { + for x in 0..w { + let a = *src0.add(x); + let b = *src1.add(x); + *dst.add(x) = ((u16::from(a) * b0 + u16::from(b) * b1 + 4) >> 3) as u8; + } + src0 = src0.add(sstride); + src1 = src1.add(sstride); + dst = dst.add(dstride); + } + } + } else if b0 == 8 { + unsafe { + let mut src = src.as_ptr(); + let mut dst = dst.as_mut_ptr(); + for _ in 0..h { + let mut a = *src; + for x in 0..w { + let b = *src.add(x + 1); + *dst.add(x) = ((u16::from(a) * a0 + u16::from(b) * a1 + 4) >> 3) as u8; + a = b; + } + src = src.add(sstride); + dst = dst.add(dstride); + } + } + } else { + unsafe { + let mut src0 = src.as_ptr(); + let mut src1 = src0.add(sstride); + let mut dst = dst.as_mut_ptr(); + for _ in 0..h { + let mut a = *src0; + let mut c = *src1; + for x in 0..w { + let b = *src0.add(x + 1); + let d = *src1.add(x + 1); + *dst.add(x) = ((u16::from(a) * a0 * b0 + u16::from(b) * a1 * b0 + u16::from(c) * a0 * b1 + u16::from(d) * a1 * b1 + 0x20) >> 6) as u8; + a = b; + c = d; + } + src0 = src0.add(sstride); + src1 = src1.add(sstride); + dst = dst.add(dstride); + } + } + } +} + +pub const H264_LUMA_INTERP: &[BlkInterpFunc] = &[ + h264_mc00, h264_mc01, h264_mc02, h264_mc03, + h264_mc10, h264_mc11, h264_mc12, h264_mc13, + h264_mc20, h264_mc21, h264_mc22, h264_mc23, + h264_mc30, h264_mc31, h264_mc32, h264_mc33 +];