X-Git-Url: https://git.nihav.org/?a=blobdiff_plain;f=nihav-itu%2Fsrc%2Fcodecs%2Fh264%2Fdsp%2Fmod.rs;h=a2a58a45dbf8a14b702598d06834eafa28413d56;hb=fe64781def821c3900abf44bdfbb38f3b3d21345;hp=2d98ddf8d33a041e4ed173102611b1938f25a6b7;hpb=2f9923e6d1505270e2647ba3c3251dd6cfbc7c09;p=nihav.git diff --git a/nihav-itu/src/codecs/h264/dsp/mod.rs b/nihav-itu/src/codecs/h264/dsp/mod.rs index 2d98ddf..a2a58a4 100644 --- a/nihav-itu/src/codecs/h264/dsp/mod.rs +++ b/nihav-itu/src/codecs/h264/dsp/mod.rs @@ -1,5 +1,7 @@ mod mc; -pub use mc::H264MC; +pub use mc::{H264MC, McBlock}; +#[cfg(target_arch="x86_64")] +use std::arch::asm; pub const CHROMA_QUANTS: [u8; 52] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -128,12 +130,12 @@ pub fn idct_luma_dc(blk: &mut [i16; 16], qp: u8) { for i in 0..4 { transform!(luma_dc; blk[i], blk[i + 4], blk[i + 8], blk[i + 12]); } - for row in blk.chunks_mut(4) { + for row in blk.chunks_exact_mut(4) { transform!(luma_dc; row[0], row[1], row[2], row[3]); } } -pub fn idct(blk: &mut [i16; 16], qp: u8, quant_dc: bool) { +pub fn idct_skip_dc(blk: &mut [i16; 16], qp: u8) { const BLK_INDEX: [usize; 16] = [ 0, 2, 0, 2, 2, 1, 2, 1, @@ -142,11 +144,30 @@ pub fn idct(blk: &mut [i16; 16], qp: u8, quant_dc: bool) { ]; let qidx = (qp % 6) as usize; let shift = qp / 6; - let start = if quant_dc { 0 } else { 1 }; - for (el, &idx) in blk.iter_mut().zip(BLK_INDEX.iter()).skip(start) { + for (el, &idx) in blk.iter_mut().zip(BLK_INDEX.iter()).skip(1) { *el = (*el * LEVEL_SCALE[idx][qidx]) << shift; } - for row in blk.chunks_mut(4) { + for row in blk.chunks_exact_mut(4) { + transform!(row[0], row[1], row[2], row[3], 0); + } + for i in 0..4 { + transform!(blk[i], blk[i + 4], blk[i + 8], blk[i + 12], 6); + } +} + +pub fn idct(blk: &mut [i16; 16], qp: u8) { + const BLK_INDEX: [usize; 16] = [ + 0, 2, 0, 2, + 2, 1, 2, 1, + 0, 2, 0, 2, + 2, 1, 2, 1 + ]; + let qidx = (qp % 6) as usize; + let shift = qp / 6; + for (el, &idx) in blk.iter_mut().zip(BLK_INDEX.iter()) { + *el = (*el * LEVEL_SCALE[idx][qidx]) << shift; + } + for row in blk.chunks_exact_mut(4) { transform!(row[0], row[1], row[2], row[3], 0); } for i in 0..4 { @@ -226,7 +247,7 @@ pub fn idct8x8(blk: &mut [i16; 64], qp: u8) { *dst = i32::from(src).wrapping_mul(i32::from(qmat[idx])).wrapping_add(bias) >> shift; } } - for row in tmp.chunks_mut(8) { + for row in tmp.chunks_exact_mut(8) { transform!(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]); } for col in 0..8 { @@ -240,7 +261,7 @@ pub fn idct8x8(blk: &mut [i16; 64], qp: u8) { pub fn add_coeffs(dst: &mut [u8], offset: usize, stride: usize, coeffs: &[i16]) { let out = &mut dst[offset..][..stride * 3 + 4]; - for (line, src) in out.chunks_mut(stride).take(4).zip(coeffs.chunks(4)) { + for (line, src) in out.chunks_mut(stride).take(4).zip(coeffs.chunks_exact(4)) { for (dst, src) in line.iter_mut().take(4).zip(src.iter()) { *dst = (i32::from(*dst) + i32::from(*src)).max(0).min(255) as u8; } @@ -249,7 +270,7 @@ pub fn add_coeffs(dst: &mut [u8], offset: usize, stride: usize, coeffs: &[i16]) pub fn add_coeffs8(dst: &mut [u8], offset: usize, stride: usize, coeffs: &[i16; 64]) { let out = &mut dst[offset..]; - for (line, src) in out.chunks_mut(stride).take(8).zip(coeffs.chunks(8)) { + for (line, src) in out.chunks_mut(stride).take(8).zip(coeffs.chunks_exact(8)) { for (dst, src) in line.iter_mut().take(8).zip(src.iter()) { *dst = (i32::from(*dst) + i32::from(*src)).max(0).min(255) as u8; } @@ -349,7 +370,7 @@ fn ipred_4x4_diag_down_left(buf: &mut [u8], stride: usize, top: &[u8], _left: &[ fn ipred_4x4_diag_down_right(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], _tr: &[u8]) { let mut t: [u16; 5] = [0; 5]; t[0] = u16::from(left[0]); - load(&mut t[1..], &top); + load(&mut t[1..], top); let mut l: [u16; 5] = [0; 5]; load(&mut l, left); let dst = buf; @@ -367,7 +388,7 @@ fn ipred_4x4_diag_down_right(buf: &mut [u8], stride: usize, top: &[u8], left: &[ fn ipred_4x4_ver_right(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], _tr: &[u8]) { let mut t: [u16; 5] = [0; 5]; t[0] = u16::from(left[0]); - load(&mut t[1..], &top); + load(&mut t[1..], top); let mut l: [u16; 5] = [0; 5]; load(&mut l, left); let dst = buf; @@ -395,7 +416,7 @@ fn ipred_4x4_ver_right(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], _ } fn ipred_4x4_ver_left(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8], tr: &[u8]) { let mut t: [u16; 8] = [0; 8]; - load(&mut t[..4], &top); + load(&mut t[..4], top); load(&mut t[4..], tr); let dst = buf; @@ -425,7 +446,7 @@ fn ipred_4x4_ver_left(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8], t fn ipred_4x4_hor_down(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], _tr: &[u8]) { let mut t: [u16; 5] = [0; 5]; t[0] = u16::from(left[0]); - load(&mut t[1..], &top); + load(&mut t[1..], top); let mut l: [u16; 5] = [0; 5]; load(&mut l, left); let dst = buf; @@ -739,7 +760,7 @@ fn ipred_8x8_dc(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8]) { let mut l = [0; 8]; load(&mut l, &left[1..]); let mut t = [0; 8]; - load(&mut t, &top); + load(&mut t, top); let dc0 = ((t[0] + t[1] + t[2] + t[3] + l[0] + l[1] + l[2] + l[3] + 4) >> 3) as u8; let sum1 = t[4] + t[5] + t[6] + t[7]; @@ -963,32 +984,122 @@ fn check_filter(buf: &[u8], off: usize, step: usize, alpha: i16, beta: i16) -> b (p0 - q0).abs() < alpha && (p1 - p0).abs() < beta && (q1 - q0).abs() < beta } +#[cfg(not(target_arch="x86_64"))] +fn check_filter4(buf: &[u8], mut off: usize, step: usize, stride: usize, alpha: i16, beta: i16) -> [bool; 4] { + let mut flags = [false; 4]; + for flag in flags.iter_mut() { + let p1 = i16::from(buf[off - step * 2]); + let p0 = i16::from(buf[off - step]); + let q0 = i16::from(buf[off]); + let q1 = i16::from(buf[off + step]); + *flag = (p0 - q0).abs() < alpha && (p1 - p0).abs() < beta && (q1 - q0).abs() < beta; + off += stride; + } + flags +} + +#[cfg(target_arch="x86_64")] +fn check_filter4(buf: &[u8], off: usize, step: usize, stride: usize, alpha: i16, beta: i16) -> [bool; 4] { + unsafe { + let mut flags = [false; 4]; + let src = buf[off - step * 2..].as_ptr(); + let load_stride = step.max(stride); + let fptr = flags.as_mut_ptr(); + let tflag = u32::from(step == 1); + asm! { + // load block + "pxor xmm4, xmm4", + "movd xmm0, dword ptr [{src}]", + "lea {tmp}, [{src} + {stride} * 2]", + "movd xmm1, dword ptr [{src} + {stride}]", + "movd xmm2, dword ptr [{tmp}]", + "movd xmm3, dword ptr [{tmp} + {stride}]", + "punpcklbw xmm0, xmm4", + "punpcklbw xmm1, xmm4", + "punpcklbw xmm2, xmm4", + "punpcklbw xmm3, xmm4", + + // transpose block if necessary so it's always processed by rows + "test {tflag:e}, {tflag:e}", + "jz 1f", + "punpcklwd xmm0, xmm1", + "movhlps xmm4, xmm0", + "punpcklwd xmm2, xmm3", + "movhlps xmm1, xmm2", + "punpckldq xmm0, xmm2", + "punpckldq xmm4, xmm1", + "movhlps xmm1, xmm0", + "movhlps xmm3, xmm4", + "movaps xmm2, xmm4", + "1:", + + // calculate deltas and flags + "movd xmm4, {alpha:r}", + "movd xmm5, {beta:r}", + "psubw xmm0, xmm1", + "psubw xmm1, xmm2", + "psubw xmm3, xmm2", + "pshuflw xmm4, xmm4, 0", + "pshuflw xmm5, xmm5, 0", + "pabsw xmm0, xmm0", // |p1 - p0| + "pabsw xmm1, xmm1", // |p0 - q0| + "pabsw xmm2, xmm3", // |q1 - q0| + "movaps xmm3, xmm5", + "pcmpgtw xmm4, xmm1", + "pcmpgtw xmm5, xmm0", + "pcmpgtw xmm3, xmm2", + "pand xmm4, xmm5", + "pand xmm4, xmm3", + "packsswb xmm4, xmm4", + "movd [{flags}], xmm4", + tmp = out(reg) _, + src = in(reg) src, + stride = in(reg) load_stride, + alpha = in(reg) alpha, + beta = in(reg) beta, + flags = in(reg) fptr, + tflag = in(reg) tflag, + out("xmm0") _, + out("xmm1") _, + out("xmm2") _, + out("xmm3") _, + out("xmm4") _, + out("xmm5") _, + } + flags + } +} + pub fn loop_filter_lumaedge_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16) { - for _ in 0..4 { - if check_filter(dst, off, 1, alpha, beta) { + let flags = check_filter4(dst, off, 1, stride, alpha, beta); + for &flag in flags.iter() { + if flag { loop_filter!(lumaedge; dst, off, 1, alpha, beta); } off += stride; } } pub fn loop_filter_lumaedge_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16) { - for x in 0..4 { - if check_filter(dst, off + x, stride, alpha, beta) { + let flags = check_filter4(dst, off, stride, 1, alpha, beta); + for (x, &flag) in flags.iter().enumerate() { + if flag { loop_filter!(lumaedge; dst, off + x, stride, alpha, beta); } } } pub fn loop_filter_lumanormal_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) { - for _ in 0..4 { - if check_filter(dst, off, 1, alpha, beta) { + let flags = check_filter4(dst, off, 1, stride, alpha, beta); + for &flag in flags.iter() { + if flag { loop_filter!(lumanormal; dst, off, 1, tc0, beta); } off += stride; } } pub fn loop_filter_lumanormal_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) { - for x in 0..4 { - if check_filter(dst, off + x, stride, alpha, beta) { + let flags = check_filter4(dst, off, stride, 1, alpha, beta); + for (x, &flag) in flags.iter().enumerate() { + if flag { loop_filter!(lumanormal; dst, off + x, stride, tc0, beta); } }