-use nihav_core::frame::*;
-use nihav_codec_support::codecs::blockdsp::*;
-use nihav_codec_support::codecs::MV;
-
-#[cfg(not(debug_assertions))]
-mod release;
-#[cfg(not(debug_assertions))]
-use release::*;
-#[cfg(debug_assertions)]
-mod debug;
-#[cfg(debug_assertions)]
-use debug::*;
+mod mc;
+pub use mc::{H264MC, McBlock};
+#[cfg(target_arch="x86_64")]
+use std::arch::asm;
pub const CHROMA_QUANTS: [u8; 52] = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
for i in 0..4 {
transform!(luma_dc; blk[i], blk[i + 4], blk[i + 8], blk[i + 12]);
}
- for row in blk.chunks_mut(4) {
+ for row in blk.chunks_exact_mut(4) {
transform!(luma_dc; row[0], row[1], row[2], row[3]);
}
}
-pub fn idct(blk: &mut [i16; 16], qp: u8, quant_dc: bool) {
+pub fn idct_skip_dc(blk: &mut [i16; 16], qp: u8) {
+ const BLK_INDEX: [usize; 16] = [
+ 0, 2, 0, 2,
+ 2, 1, 2, 1,
+ 0, 2, 0, 2,
+ 2, 1, 2, 1
+ ];
+ let qidx = (qp % 6) as usize;
+ let shift = qp / 6;
+ for (el, &idx) in blk.iter_mut().zip(BLK_INDEX.iter()).skip(1) {
+ *el = (*el * LEVEL_SCALE[idx][qidx]) << shift;
+ }
+ for row in blk.chunks_exact_mut(4) {
+ transform!(row[0], row[1], row[2], row[3], 0);
+ }
+ for i in 0..4 {
+ transform!(blk[i], blk[i + 4], blk[i + 8], blk[i + 12], 6);
+ }
+}
+
+pub fn idct(blk: &mut [i16; 16], qp: u8) {
const BLK_INDEX: [usize; 16] = [
0, 2, 0, 2,
2, 1, 2, 1,
];
let qidx = (qp % 6) as usize;
let shift = qp / 6;
- let start = if quant_dc { 0 } else { 1 };
- for (el, &idx) in blk.iter_mut().zip(BLK_INDEX.iter()).skip(start) {
+ for (el, &idx) in blk.iter_mut().zip(BLK_INDEX.iter()) {
*el = (*el * LEVEL_SCALE[idx][qidx]) << shift;
}
- for row in blk.chunks_mut(4) {
+ for row in blk.chunks_exact_mut(4) {
transform!(row[0], row[1], row[2], row[3], 0);
}
for i in 0..4 {
*dst = i32::from(src).wrapping_mul(i32::from(qmat[idx])).wrapping_add(bias) >> shift;
}
}
- for row in tmp.chunks_mut(8) {
+ for row in tmp.chunks_exact_mut(8) {
transform!(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]);
}
for col in 0..8 {
pub fn add_coeffs(dst: &mut [u8], offset: usize, stride: usize, coeffs: &[i16]) {
let out = &mut dst[offset..][..stride * 3 + 4];
- for (line, src) in out.chunks_mut(stride).take(4).zip(coeffs.chunks(4)) {
+ for (line, src) in out.chunks_mut(stride).take(4).zip(coeffs.chunks_exact(4)) {
for (dst, src) in line.iter_mut().take(4).zip(src.iter()) {
*dst = (i32::from(*dst) + i32::from(*src)).max(0).min(255) as u8;
}
pub fn add_coeffs8(dst: &mut [u8], offset: usize, stride: usize, coeffs: &[i16; 64]) {
let out = &mut dst[offset..];
- for (line, src) in out.chunks_mut(stride).take(8).zip(coeffs.chunks(8)) {
+ for (line, src) in out.chunks_mut(stride).take(8).zip(coeffs.chunks_exact(8)) {
for (dst, src) in line.iter_mut().take(8).zip(src.iter()) {
*dst = (i32::from(*dst) + i32::from(*src)).max(0).min(255) as u8;
}
}
}
-pub fn avg(dst: &mut [u8], dstride: usize,
- src: &[u8], sstride: usize, bw: usize, bh: usize) {
- for (dline, sline) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(bh) {
- for (dst, src) in dline.iter_mut().zip(sline.iter()).take(bw) {
- *dst = ((u16::from(*dst) + u16::from(*src) + 1) >> 1) as u8;
- }
- }
-}
-
fn clip8(val: i16) -> u8 { val.max(0).min(255) as u8 }
-fn ipred_dc128(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize) {
- for _ in 0..bsize {
- for x in 0..bsize { buf[idx + x] = 128; }
- idx += stride;
+fn ipred_dc128(buf: &mut [u8], stride: usize, bsize: usize) {
+ for row in buf.chunks_mut(stride).take(bsize) {
+ for el in row[..bsize].iter_mut() {
+ *el = 128;
+ }
}
}
-fn ipred_ver(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize) {
- let oidx = idx - stride;
- for _ in 0..bsize {
- for x in 0..bsize { buf[idx + x] = buf[oidx + x]; }
- idx += stride;
+fn ipred_ver(buf: &mut [u8], stride: usize, top: &[u8], bsize: usize) {
+ for row in buf.chunks_mut(stride).take(bsize) {
+ row[..bsize].copy_from_slice(&top[..bsize]);
}
}
-fn ipred_hor(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize) {
- for _ in 0..bsize {
- for x in 0..bsize { buf[idx + x] = buf[idx - 1]; }
- idx += stride;
+fn ipred_hor(buf: &mut [u8], stride: usize, left: &[u8], bsize: usize) {
+ for (row, &left) in buf.chunks_mut(stride).zip(left[1..].iter()).take(bsize) {
+ for el in row[..bsize].iter_mut() {
+ *el = left;
+ }
}
}
-fn ipred_dc(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize, shift: u8) {
+fn ipred_dc(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], bsize: usize, shift: u8) {
let mut adc: u16 = 0;
- for i in 0..bsize { adc += u16::from(buf[idx - stride + i]); }
- for i in 0..bsize { adc += u16::from(buf[idx - 1 + i * stride]); }
+ for i in 0..bsize { adc += u16::from(top[i]); }
+ for i in 0..bsize { adc += u16::from(left[i + 1]); }
let dc = ((adc + (1 << (shift - 1))) >> shift) as u8;
- for _ in 0..bsize {
- for x in 0..bsize { buf[idx + x] = dc; }
- idx += stride;
+ for row in buf.chunks_mut(stride).take(bsize) {
+ for el in row[..bsize].iter_mut() {
+ *el = dc;
+ }
}
}
-fn ipred_left_dc(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize, shift: u8) {
+fn ipred_left_dc(buf: &mut [u8], stride: usize, left: &[u8], bsize: usize, shift: u8) {
let mut adc: u16 = 0;
- for i in 0..bsize { adc += u16::from(buf[idx - 1 + i * stride]); }
+ for i in 0..bsize { adc += u16::from(left[i + 1]); }
let dc = ((adc + (1 << (shift - 1))) >> shift) as u8;
- for _ in 0..bsize {
- for x in 0..bsize { buf[idx + x] = dc; }
- idx += stride;
+ for row in buf.chunks_mut(stride).take(bsize) {
+ for el in row[..bsize].iter_mut() {
+ *el = dc;
+ }
}
}
-fn ipred_top_dc(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize, shift: u8) {
+fn ipred_top_dc(buf: &mut [u8], stride: usize, top: &[u8], bsize: usize, shift: u8) {
let mut adc: u16 = 0;
- for i in 0..bsize { adc += u16::from(buf[idx - stride + i]); }
+ for i in 0..bsize { adc += u16::from(top[i]); }
let dc = ((adc + (1 << (shift - 1))) >> shift) as u8;
- for _ in 0..bsize {
- for x in 0..bsize { buf[idx + x] = dc; }
- idx += stride;
+ for row in buf.chunks_mut(stride).take(bsize) {
+ for el in row[..bsize].iter_mut() {
+ *el = dc;
+ }
}
}
-fn load_top(dst: &mut [u16], buf: &mut [u8], idx: usize, stride: usize, len: usize) {
- for i in 0..len { dst[i] = u16::from(buf[idx - stride + i]); }
-}
-fn load_left(dst: &mut [u16], buf: &mut [u8], idx: usize, stride: usize, len: usize) {
- for i in 0..len { dst[i] = u16::from(buf[idx - 1 + i * stride]); }
+fn load(dst: &mut [u16], src: &[u8]) {
+ for (dst, &src) in dst.iter_mut().zip(src.iter()) {
+ *dst = u16::from(src);
+ }
}
-fn ipred_4x4_ver(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
- ipred_ver(buf, idx, stride, 4);
+fn ipred_4x4_ver(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8], _tr: &[u8]) {
+ ipred_ver(buf, stride, top, 4);
}
-fn ipred_4x4_hor(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
- ipred_hor(buf, idx, stride, 4);
+fn ipred_4x4_hor(buf: &mut [u8], stride: usize, _top: &[u8], left: &[u8], _tr: &[u8]) {
+ ipred_hor(buf, stride, left, 4);
}
-fn ipred_4x4_diag_down_left(buf: &mut [u8], idx: usize, stride: usize, tr: &[u8]) {
+fn ipred_4x4_diag_down_left(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8], tr: &[u8]) {
let mut t: [u16; 9] = [0; 9];
- load_top(&mut t, buf, idx, stride, 4);
- for i in 0..4 {
- t[i + 4] = u16::from(tr[i]);
- }
+ load(&mut t[..4], top);
+ load(&mut t[4..8], tr);
t[8] = t[7];
- let dst = &mut buf[idx..];
for i in 0..4 {
- dst[i] = ((t[i] + 2 * t[i + 1] + t[i + 2] + 2) >> 2) as u8;
+ buf[i] = ((t[i] + 2 * t[i + 1] + t[i + 2] + 2) >> 2) as u8;
}
- let dst = &mut buf[idx + stride..];
+ let dst = &mut buf[stride..];
for i in 0..4 {
dst[i] = ((t[i + 1] + 2 * t[i + 2] + t[i + 3] + 2) >> 2) as u8;
}
- let dst = &mut buf[idx + stride * 2..];
+ let dst = &mut buf[stride * 2..];
for i in 0..4 {
dst[i] = ((t[i + 2] + 2 * t[i + 3] + t[i + 4] + 2) >> 2) as u8;
}
- let dst = &mut buf[idx + stride * 3..];
+ let dst = &mut buf[stride * 3..];
for i in 0..4 {
dst[i] = ((t[i + 3] + 2 * t[i + 4] + t[i + 5] + 2) >> 2) as u8;
}
}
-fn ipred_4x4_diag_down_right(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
+fn ipred_4x4_diag_down_right(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], _tr: &[u8]) {
let mut t: [u16; 5] = [0; 5];
+ t[0] = u16::from(left[0]);
+ load(&mut t[1..], top);
let mut l: [u16; 5] = [0; 5];
- load_top(&mut t, buf, idx - 1, stride, 5);
- load_left(&mut l, buf, idx - stride, stride, 5);
- let dst = &mut buf[idx..];
+ load(&mut l, left);
+ let dst = buf;
for j in 0..4 {
for i in 0..j {
}
}
}
-fn ipred_4x4_ver_right(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
+fn ipred_4x4_ver_right(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], _tr: &[u8]) {
let mut t: [u16; 5] = [0; 5];
+ t[0] = u16::from(left[0]);
+ load(&mut t[1..], top);
let mut l: [u16; 5] = [0; 5];
- load_top(&mut t, buf, idx - 1, stride, 5);
- load_left(&mut l, buf, idx - stride, stride, 5);
- let dst = &mut buf[idx..];
+ load(&mut l, left);
+ let dst = buf;
for j in 0..4 {
for i in 0..4 {
}
}
}
-fn ipred_4x4_ver_left(buf: &mut [u8], idx: usize, stride: usize, tr: &[u8]) {
+fn ipred_4x4_ver_left(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8], tr: &[u8]) {
let mut t: [u16; 8] = [0; 8];
- load_top(&mut t, buf, idx, stride, 4);
- for i in 0..4 { t[i + 4] = u16::from(tr[i]); }
- let dst = &mut buf[idx..];
+ load(&mut t[..4], top);
+ load(&mut t[4..], tr);
+ let dst = buf;
dst[0 + 0 * stride] = ((t[0] + t[1] + 1) >> 1) as u8;
let pix = ((t[1] + t[2] + 1) >> 1) as u8;
dst[2 + 3 * stride] = pix;
dst[3 + 3 * stride] = ((t[4] + 2*t[5] + t[6] + 2) >> 2) as u8;
}
-fn ipred_4x4_hor_down(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
+fn ipred_4x4_hor_down(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], _tr: &[u8]) {
let mut t: [u16; 5] = [0; 5];
+ t[0] = u16::from(left[0]);
+ load(&mut t[1..], top);
let mut l: [u16; 5] = [0; 5];
- load_top(&mut t, buf, idx - 1, stride, 5);
- load_left(&mut l, buf, idx - stride, stride, 5);
- let dst = &mut buf[idx..];
+ load(&mut l, left);
+ let dst = buf;
for j in 0..4 {
for i in 0..4 {
}
}
}
-fn ipred_4x4_hor_up(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
+fn ipred_4x4_hor_up(buf: &mut [u8], stride: usize, _top: &[u8], left: &[u8], _tr: &[u8]) {
let mut l: [u16; 8] = [0; 8];
- load_left(&mut l, buf, idx, stride, 8);
- let dst = &mut buf[idx..];
+ load(&mut l, &left[1..]);
+ let dst = buf;
dst[0 + 0 * stride] = ((l[0] + l[1] + 1) >> 1) as u8;
dst[1 + 0 * stride] = ((l[0] + 2*l[1] + l[2] + 2) >> 2) as u8;
dst[2 + 3 * stride] = l[3] as u8;
dst[3 + 3 * stride] = l[3] as u8;
}
-fn ipred_4x4_dc(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
- ipred_dc(buf, idx, stride, 4, 3);
+fn ipred_4x4_dc(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], _tr: &[u8]) {
+ ipred_dc(buf, stride, top, left, 4, 3);
}
-fn ipred_4x4_left_dc(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
- ipred_left_dc(buf, idx, stride, 4, 2);
+fn ipred_4x4_left_dc(buf: &mut [u8], stride: usize, _top: &[u8], left: &[u8], _tr: &[u8]) {
+ ipred_left_dc(buf, stride, left, 4, 2);
}
-fn ipred_4x4_top_dc(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
- ipred_top_dc(buf, idx, stride, 4, 2);
+fn ipred_4x4_top_dc(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8], _tr: &[u8]) {
+ ipred_top_dc(buf, stride, top, 4, 2);
}
-fn ipred_4x4_dc128(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) {
- ipred_dc128(buf, idx, stride, 4);
+fn ipred_4x4_dc128(buf: &mut [u8], stride: usize, _top: &[u8], _left: &[u8], _tr: &[u8]) {
+ ipred_dc128(buf, stride, 4);
}
pub struct IPred8Context {
tl: 128,
}
}
- pub fn fill(&mut self, buf: &mut [u8], idx: usize, stride: usize, has_t: bool, has_tr: bool, has_l: bool, has_tl: bool) {
+ pub fn fill(&mut self, top: &[u8], left: &[u8], has_t: bool, has_tr: bool, has_l: bool, has_tl: bool) {
let mut t = [0x80u8; 19];
let mut l = [0x80u8; 11];
if has_t {
- t[1..8 + 1].copy_from_slice(&buf[idx - stride..][..8]);
+ t[1..8 + 1].copy_from_slice(&top[..8]);
}
if has_tr {
- t[8 + 1..16 + 1].copy_from_slice(&buf[idx - stride + 8..][..8]);
+ t[8 + 1..16 + 1].copy_from_slice(&top[8..][..8]);
t[16 + 1] = t[15 + 1];
t[17 + 1] = t[15 + 1];
} else {
}
}
if has_l {
- for i in 0..8 {
- l[i + 1] = buf[idx - 1 + stride * i];
- }
+ l[1..9].copy_from_slice(&left[1..9]);
l[8 + 1] = l[7 + 1];
l[9 + 1] = l[7 + 1];
}
if has_tl {
- t[0] = buf[idx - 1 - stride];
- l[0] = buf[idx - 1 - stride];
+ t[0] = left[0];
+ l[0] = left[0];
} else {
t[0] = t[1];
l[0] = l[1];
}
fn ipred_y_8x8_diag_down_left(buf: &mut [u8], stride: usize, ctx: &IPred8Context) {
let mut t = [0u16; 16];
- for (dt, &st) in t.iter_mut().zip(ctx.t.iter()) {
- *dt = u16::from(st);
- }
+ load(&mut t, &ctx.t);
for (y, row) in buf.chunks_mut(stride).take(8).enumerate() {
for (x, pix) in row.iter_mut().take(8).enumerate() {
fn ipred_y_8x8_diag_down_right(buf: &mut [u8], stride: usize, ctx: &IPred8Context) {
let mut t = [0u16; 9];
t[0] = u16::from(ctx.tl);
- for (dt, &st) in t[1..].iter_mut().zip(ctx.t.iter()) {
- *dt = u16::from(st);
- }
+ load(&mut t[1..], &ctx.t);
let mut l = [0u16; 9];
l[0] = u16::from(ctx.tl);
- for (dl, &sl) in l[1..].iter_mut().zip(ctx.l.iter()) {
- *dl = u16::from(sl);
- }
+ load(&mut l[1..], &ctx.l);
let diag = t[1] + 2 * t[0] + l[1];
for (y, row) in buf.chunks_mut(stride).take(8).enumerate() {
fn ipred_y_8x8_ver_right(buf: &mut [u8], stride: usize, ctx: &IPred8Context) {
let mut t = [0u16; 9];
t[0] = u16::from(ctx.tl);
- for (dt, &st) in t[1..].iter_mut().zip(ctx.t.iter()) {
- *dt = u16::from(st);
- }
+ load(&mut t[1..], &ctx.t);
let mut l = [0u16; 9];
l[0] = u16::from(ctx.tl);
- for (dl, &sl) in l[1..].iter_mut().zip(ctx.l.iter()) {
- *dl = u16::from(sl);
- }
+ load(&mut l[1..], &ctx.l);
for (y, row) in buf.chunks_mut(stride).take(8).enumerate() {
for (x, pix) in row.iter_mut().take(8).enumerate() {
}
fn ipred_y_8x8_ver_left(buf: &mut [u8], stride: usize, ctx: &IPred8Context) {
let mut t = [0u16; 16];
- for (dt, &st) in t.iter_mut().zip(ctx.t.iter()) {
- *dt = u16::from(st);
- }
+ load(&mut t, &ctx.t);
for (y, row) in buf.chunks_mut(stride).take(8).enumerate() {
for (x, pix) in row.iter_mut().take(8).enumerate() {
fn ipred_y_8x8_hor_down(buf: &mut [u8], stride: usize, ctx: &IPred8Context) {
let mut t = [0u16; 9];
t[0] = u16::from(ctx.tl);
- for (dt, &st) in t[1..].iter_mut().zip(ctx.t.iter()) {
- *dt = u16::from(st);
- }
+ load(&mut t[1..], &ctx.t);
let mut l = [0u16; 9];
l[0] = u16::from(ctx.tl);
- for (dl, &sl) in l[1..].iter_mut().zip(ctx.l.iter()) {
- *dl = u16::from(sl);
- }
+ load(&mut l[1..], &ctx.l);
for (y, row) in buf.chunks_mut(stride).take(8).enumerate() {
for (x, pix) in row.iter_mut().take(8).enumerate() {
}
fn ipred_y_8x8_hor_up(buf: &mut [u8], stride: usize, ctx: &IPred8Context) {
let mut l = [0u16; 8];
- for (dl, &sl) in l.iter_mut().zip(ctx.l.iter()) {
- *dl = u16::from(sl);
- }
+ load(&mut l, &ctx.l);
for (y, row) in buf.chunks_mut(stride).take(8).enumerate() {
for (x, pix) in row.iter_mut().take(8).enumerate() {
}
}
fn ipred_y_8x8_dc128(buf: &mut [u8], stride: usize, _ctx: &IPred8Context) {
- ipred_dc128(buf, 0, stride, 8);
+ ipred_dc128(buf, stride, 8);
}
-fn ipred_8x8_ver(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_ver(buf, idx, stride, 8);
+fn ipred_8x8_ver(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8]) {
+ ipred_ver(buf, stride, top, 8);
}
-fn ipred_8x8_hor(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_hor(buf, idx, stride, 8);
+fn ipred_8x8_hor(buf: &mut [u8], stride: usize, _top: &[u8], left: &[u8]) {
+ ipred_hor(buf, stride, left, 8);
}
-fn ipred_8x8_dc(buf: &mut [u8], idx: usize, stride: usize) {
- let mut t: [u16; 8] = [0; 8];
- load_top(&mut t, buf, idx, stride, 8);
- let mut l: [u16; 8] = [0; 8];
- load_left(&mut l, buf, idx, stride, 8);
+fn ipred_8x8_dc(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8]) {
+ let mut l = [0; 8];
+ load(&mut l, &left[1..]);
+ let mut t = [0; 8];
+ load(&mut t, top);
let dc0 = ((t[0] + t[1] + t[2] + t[3] + l[0] + l[1] + l[2] + l[3] + 4) >> 3) as u8;
let sum1 = t[4] + t[5] + t[6] + t[7];
let dc2 = ((sum2 + 2) >> 2) as u8;
let dc3 = ((sum1 + sum2 + 4) >> 3) as u8;
- let dst = &mut buf[idx..];
- for row in dst.chunks_mut(stride).take(4) {
+ for row in buf.chunks_mut(stride).take(4) {
row[..4].copy_from_slice(&[dc0; 4]);
row[4..8].copy_from_slice(&[dc1; 4]);
}
- for row in dst.chunks_mut(stride).skip(4).take(4) {
+ for row in buf.chunks_mut(stride).skip(4).take(4) {
row[..4].copy_from_slice(&[dc2; 4]);
row[4..8].copy_from_slice(&[dc3; 4]);
}
}
-fn ipred_8x8_left_dc(buf: &mut [u8], idx: usize, stride: usize) {
+fn ipred_8x8_left_dc(buf: &mut [u8], stride: usize, _top: &[u8], left: &[u8]) {
let mut left_dc0 = 0;
let mut left_dc1 = 0;
- for row in buf[idx - 1..].chunks(stride).take(4) {
- left_dc0 += u16::from(row[0]);
+ for &el in left[1..].iter().take(4) {
+ left_dc0 += u16::from(el);
}
- for row in buf[idx - 1..].chunks(stride).skip(4).take(4) {
- left_dc1 += u16::from(row[0]);
+ for &el in left[1..].iter().skip(4).take(4) {
+ left_dc1 += u16::from(el);
}
let dc0 = ((left_dc0 + 2) >> 2) as u8;
let dc2 = ((left_dc1 + 2) >> 2) as u8;
- for row in buf[idx..].chunks_mut(stride).take(4) {
+ for row in buf.chunks_mut(stride).take(4) {
row[..8].copy_from_slice(&[dc0; 8]);
}
- for row in buf[idx..].chunks_mut(stride).skip(4).take(4) {
+ for row in buf.chunks_mut(stride).skip(4).take(4) {
row[..8].copy_from_slice(&[dc2; 8]);
}
}
-fn ipred_8x8_top_dc(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_top_dc(buf, idx, stride, 4, 2);
- ipred_top_dc(buf, idx + 4, stride, 4, 2);
- ipred_top_dc(buf, idx + 4 * stride, stride, 4, 2);
- ipred_top_dc(buf, idx + 4 + 4 * stride, stride, 4, 2);
+fn ipred_8x8_top_dc(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8]) {
+ ipred_top_dc(buf, stride, top, 4, 2);
+ ipred_top_dc(&mut buf[4..], stride, &top[4..], 4, 2);
+ let mut top = [0; 8];
+ top.copy_from_slice(&buf[stride * 3..][..8]);
+ ipred_top_dc(&mut buf[4 * stride..], stride, &top, 4, 2);
+ ipred_top_dc(&mut buf[4 + 4 * stride..], stride, &top[4..], 4, 2);
}
-fn ipred_8x8_dc128(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_dc128(buf, idx, stride, 8);
+fn ipred_8x8_dc128(buf: &mut [u8], stride: usize, _top: &[u8], _left: &[u8]) {
+ ipred_dc128(buf, stride, 8);
}
-fn ipred_8x8_plane(buf: &mut [u8], idx: usize, stride: usize) {
- let mut h: i32 = 0;
- let mut v: i32 = 0;
- let idx0 = idx + 3 - stride;
- let mut idx1 = idx + 4 * stride - 1;
- let mut idx2 = idx + 2 * stride - 1;
- for i in 0..4 {
+fn ipred_8x8_plane(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8]) {
+ let mut h: i32 = 4 * (i32::from(top[7]) - i32::from(left[0]));
+ let mut v: i32 = 4 * (i32::from(left[8]) - i32::from(left[0]));
+ for i in 0..3 {
let i1 = (i + 1) as i32;
- h += i1 * (i32::from(buf[idx0 + i + 1]) - i32::from(buf[idx0 - i - 1]));
- v += i1 * (i32::from(buf[idx1]) - i32::from(buf[idx2]));
- idx1 += stride;
- idx2 -= stride;
+ h += i1 * (i32::from(top[4 + i]) - i32::from(top[2 - i]));
+ v += i1 * (i32::from(left[5 + i]) - i32::from(left[3 - i]));
}
let b = (17 * h + 16) >> 5;
let c = (17 * v + 16) >> 5;
- let mut a = 16 * (i32::from(buf[idx - 1 + 7 * stride]) + i32::from(buf[idx + 7 - stride])) - 3 * (b + c) + 16;
- for line in buf[idx..].chunks_mut(stride).take(8) {
+ let mut a = 16 * (i32::from(left[8]) + i32::from(top[7])) - 3 * (b + c) + 16;
+ for line in buf.chunks_mut(stride).take(8) {
let mut acc = a;
for el in line.iter_mut().take(8) {
*el = clip8((acc >> 5) as i16);
}
}
-fn ipred_16x16_ver(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_ver(buf, idx, stride, 16);
+fn ipred_16x16_ver(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8]) {
+ ipred_ver(buf, stride, top, 16);
}
-fn ipred_16x16_hor(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_hor(buf, idx, stride, 16);
+fn ipred_16x16_hor(buf: &mut [u8], stride: usize, _top: &[u8], left: &[u8]) {
+ ipred_hor(buf, stride, left, 16);
}
-fn ipred_16x16_dc(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_dc(buf, idx, stride, 16, 5);
+fn ipred_16x16_dc(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8]) {
+ ipred_dc(buf, stride, top, left, 16, 5);
}
-fn ipred_16x16_left_dc(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_left_dc(buf, idx, stride, 16, 4);
+fn ipred_16x16_left_dc(buf: &mut [u8], stride: usize, _top: &[u8], left: &[u8]) {
+ ipred_left_dc(buf, stride, left, 16, 4);
}
-fn ipred_16x16_top_dc(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_top_dc(buf, idx, stride, 16, 4);
+fn ipred_16x16_top_dc(buf: &mut [u8], stride: usize, top: &[u8], _left: &[u8]) {
+ ipred_top_dc(buf, stride, top, 16, 4);
}
-fn ipred_16x16_dc128(buf: &mut [u8], idx: usize, stride: usize) {
- ipred_dc128(buf, idx, stride, 16);
+fn ipred_16x16_dc128(buf: &mut [u8], stride: usize, _top: &[u8], _left: &[u8]) {
+ ipred_dc128(buf, stride, 16);
}
-fn ipred_16x16_plane(buf: &mut [u8], idx: usize, stride: usize) {
- let idx0 = idx + 7 - stride;
- let mut idx1 = idx + 8 * stride - 1;
- let mut idx2 = idx1 - 2 * stride;
-
- let mut h = i32::from(buf[idx0 + 1]) - i32::from(buf[idx0 - 1]);
- let mut v = i32::from(buf[idx1]) - i32::from(buf[idx2]);
-
- for k in 2..9 {
- idx1 += stride;
- idx2 -= stride;
- h += (k as i32) * (i32::from(buf[idx0 + k]) - i32::from(buf[idx0 - k]));
- v += (k as i32) * (i32::from(buf[idx1]) - i32::from(buf[idx2]));
+fn ipred_16x16_plane(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8]) {
+ let mut h = 8 * (i32::from(top[15]) - i32::from(left[0]));
+ let mut v = 8 * (i32::from(left[16]) - i32::from(left[0]));
+ for k in 0..7 {
+ h += ((k as i32) + 1) * (i32::from(top[8 + k]) - i32::from(top[6 - k]));
+ v += ((k as i32) + 1) * (i32::from(left[9 + k]) - i32::from(left[7 - k]));
}
+
h = (5 * h + 32) >> 6;
v = (5 * v + 32) >> 6;
- let mut a = 16 * (i32::from(buf[idx - 1 + 15 * stride]) + i32::from(buf[idx + 15 - stride]) + 1) - 7 * (v + h);
+ let mut a = 16 * (i32::from(left[16]) + i32::from(top[15]) + 1) - 7 * (v + h);
- for row in buf[idx..].chunks_mut(stride).take(16) {
+ for row in buf.chunks_mut(stride).take(16) {
let mut b = a;
a += v;
}
}
-pub type IPred4x4Func = fn(buf: &mut [u8], off: usize, stride: usize, tr: &[u8]);
-pub type IPred8x8Func = fn(buf: &mut [u8], off: usize, stride: usize);
+pub type IPred4x4Func = fn(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8], tr: &[u8]);
+pub type IPred8x8Func = fn(buf: &mut [u8], stride: usize, top: &[u8], left: &[u8]);
pub type IPred8x8LumaFunc = fn(buf: &mut [u8], stride: usize, ctx: &IPred8Context);
pub const IPRED4_DC128: usize = 11;
ipred_16x16_left_dc, ipred_16x16_top_dc, ipred_16x16_dc128
];
-fn clip_u8(val: i16) -> u8 { val.max(0).min(255) as u8 }
-
-pub fn do_mc(frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) {
- let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize;
- copy_block(frm, refpic.clone(), 0, xpos, ypos, mv.x >> 2, mv.y >> 2, w, h, 2, 3, mode, H264_LUMA_INTERP);
-
- let (cw, ch) = refpic.get_dimensions(1);
- let mvx = mv.x >> 3;
- let mvy = mv.y >> 3;
- let dx = (mv.x & 7) as u16;
- let dy = (mv.y & 7) as u16;
- let mut ebuf = [0u8; 18 * 9];
- let src_x = ((xpos >> 1) as isize) + (mvx as isize);
- let src_y = ((ypos >> 1) as isize) + (mvy as isize);
- let suoff = refpic.get_offset(1);
- let svoff = refpic.get_offset(2);
- let sustride = refpic.get_stride(1);
- let svstride = refpic.get_stride(2);
- let src = refpic.get_data();
- let cbw = w / 2;
- let cbh = h / 2;
- let (csrc, cstride) = if (src_x < 0) || (src_x + (cbw as isize) + 1 > (cw as isize)) || (src_y < 0) || (src_y + (cbh as isize) + 1 > (ch as isize)) {
- edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf, 18, 1, 4);
- edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf[9..], 18, 2, 4);
- ([&ebuf, &ebuf[9..]], [18, 18])
- } else {
- ([&src[suoff + (src_x as usize) + (src_y as usize) * sustride..],
- &src[svoff + (src_x as usize) + (src_y as usize) * svstride..]],
- [sustride, svstride])
- };
- for chroma in 1..3 {
- let off = frm.offset[chroma] + xpos / 2 + (ypos / 2) * frm.stride[chroma];
- chroma_interp(&mut frm.data[off..], frm.stride[chroma], csrc[chroma - 1], cstride[chroma - 1], dx, dy, cbw, cbh);
- }
-}
-
-pub fn gray_block(frm: &mut NASimpleVideoFrame<u8>, x: usize, y: usize, w: usize, h: usize) {
- let yoff = frm.offset[0] + x + y * frm.stride[0];
- let coff = [frm.offset[1] + x / 2 + y / 2 * frm.stride[1],
- frm.offset[2] + x / 2 + y / 2 * frm.stride[2]];
- if w == 16 && h == 16 {
- IPRED_FUNCS16X16[IPRED8_DC128](frm.data, yoff, frm.stride[0]);
- for chroma in 1..2 {
- IPRED_FUNCS8X8_CHROMA[IPRED8_DC128](frm.data, coff[chroma - 1], frm.stride[chroma]);
- }
- } else if w == 8 && h == 8 {
- IPRED_FUNCS8X8_CHROMA[IPRED8_DC128](frm.data, yoff, frm.stride[0]);
- for chroma in 1..2 {
- IPRED_FUNCS4X4[IPRED4_DC128](frm.data, coff[chroma - 1], frm.stride[chroma], &[128; 4]);
- }
- } else {
- for row in frm.data[yoff..].chunks_mut(frm.stride[0]).take(h) {
- for el in row[..w].iter_mut() {
- *el = 128;
- }
- }
- for chroma in 0..2 {
- for row in frm.data[coff[chroma]..].chunks_mut(frm.stride[chroma + 1]).take(h / 2) {
- for el in row[..w / 2].iter_mut() {
- *el = 128;
- }
- }
- }
- }
-}
-
-pub fn do_mc_avg(frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV, avg_buf: &mut NAVideoBufferRef<u8>) {
- let mut afrm = NASimpleVideoFrame::from_video_buf(avg_buf).unwrap();
- let amv = MV { x: mv.x + (xpos as i16) * 4, y: mv.y + (ypos as i16) * 4 };
- do_mc(&mut afrm, refpic, 0, 0, w, h, amv);
- for comp in 0..3 {
- let shift = if comp == 0 { 0 } else { 1 };
- avg(&mut frm.data[frm.offset[comp] + (xpos >> shift) + (ypos >> shift) * frm.stride[comp]..], frm.stride[comp], &afrm.data[afrm.offset[comp]..], afrm.stride[comp], w >> shift, h >> shift);
- }
-}
-
macro_rules! loop_filter {
(lumaedge; $buf: expr, $off: expr, $step: expr, $alpha: expr, $beta: expr) => {
let p2 = i16::from($buf[$off - $step * 3]);
(p0 - q0).abs() < alpha && (p1 - p0).abs() < beta && (q1 - q0).abs() < beta
}
+#[cfg(not(target_arch="x86_64"))]
+fn check_filter4(buf: &[u8], mut off: usize, step: usize, stride: usize, alpha: i16, beta: i16) -> [bool; 4] {
+ let mut flags = [false; 4];
+ for flag in flags.iter_mut() {
+ let p1 = i16::from(buf[off - step * 2]);
+ let p0 = i16::from(buf[off - step]);
+ let q0 = i16::from(buf[off]);
+ let q1 = i16::from(buf[off + step]);
+ *flag = (p0 - q0).abs() < alpha && (p1 - p0).abs() < beta && (q1 - q0).abs() < beta;
+ off += stride;
+ }
+ flags
+}
+
+#[cfg(target_arch="x86_64")]
+fn check_filter4(buf: &[u8], off: usize, step: usize, stride: usize, alpha: i16, beta: i16) -> [bool; 4] {
+ unsafe {
+ let mut flags = [false; 4];
+ let src = buf[off - step * 2..].as_ptr();
+ let load_stride = step.max(stride);
+ let fptr = flags.as_mut_ptr();
+ let tflag = u32::from(step == 1);
+ asm! {
+ // load block
+ "pxor xmm4, xmm4",
+ "movd xmm0, dword ptr [{src}]",
+ "lea {tmp}, [{src} + {stride} * 2]",
+ "movd xmm1, dword ptr [{src} + {stride}]",
+ "movd xmm2, dword ptr [{tmp}]",
+ "movd xmm3, dword ptr [{tmp} + {stride}]",
+ "punpcklbw xmm0, xmm4",
+ "punpcklbw xmm1, xmm4",
+ "punpcklbw xmm2, xmm4",
+ "punpcklbw xmm3, xmm4",
+
+ // transpose block if necessary so it's always processed by rows
+ "test {tflag:e}, {tflag:e}",
+ "jz 1f",
+ "punpcklwd xmm0, xmm1",
+ "movhlps xmm4, xmm0",
+ "punpcklwd xmm2, xmm3",
+ "movhlps xmm1, xmm2",
+ "punpckldq xmm0, xmm2",
+ "punpckldq xmm4, xmm1",
+ "movhlps xmm1, xmm0",
+ "movhlps xmm3, xmm4",
+ "movaps xmm2, xmm4",
+ "1:",
+
+ // calculate deltas and flags
+ "movd xmm4, {alpha:r}",
+ "movd xmm5, {beta:r}",
+ "psubw xmm0, xmm1",
+ "psubw xmm1, xmm2",
+ "psubw xmm3, xmm2",
+ "pshuflw xmm4, xmm4, 0",
+ "pshuflw xmm5, xmm5, 0",
+ "pabsw xmm0, xmm0", // |p1 - p0|
+ "pabsw xmm1, xmm1", // |p0 - q0|
+ "pabsw xmm2, xmm3", // |q1 - q0|
+ "movaps xmm3, xmm5",
+ "pcmpgtw xmm4, xmm1",
+ "pcmpgtw xmm5, xmm0",
+ "pcmpgtw xmm3, xmm2",
+ "pand xmm4, xmm5",
+ "pand xmm4, xmm3",
+ "packsswb xmm4, xmm4",
+ "movd [{flags}], xmm4",
+ tmp = out(reg) _,
+ src = in(reg) src,
+ stride = in(reg) load_stride,
+ alpha = in(reg) alpha,
+ beta = in(reg) beta,
+ flags = in(reg) fptr,
+ tflag = in(reg) tflag,
+ out("xmm0") _,
+ out("xmm1") _,
+ out("xmm2") _,
+ out("xmm3") _,
+ out("xmm4") _,
+ out("xmm5") _,
+ }
+ flags
+ }
+}
+
pub fn loop_filter_lumaedge_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16) {
- for _ in 0..4 {
- if check_filter(dst, off, 1, alpha, beta) {
+ let flags = check_filter4(dst, off, 1, stride, alpha, beta);
+ for &flag in flags.iter() {
+ if flag {
loop_filter!(lumaedge; dst, off, 1, alpha, beta);
}
off += stride;
}
}
pub fn loop_filter_lumaedge_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16) {
- for x in 0..4 {
- if check_filter(dst, off + x, stride, alpha, beta) {
+ let flags = check_filter4(dst, off, stride, 1, alpha, beta);
+ for (x, &flag) in flags.iter().enumerate() {
+ if flag {
loop_filter!(lumaedge; dst, off + x, stride, alpha, beta);
}
}
}
pub fn loop_filter_lumanormal_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) {
- for _ in 0..4 {
- if check_filter(dst, off, 1, alpha, beta) {
+ let flags = check_filter4(dst, off, 1, stride, alpha, beta);
+ for &flag in flags.iter() {
+ if flag {
loop_filter!(lumanormal; dst, off, 1, tc0, beta);
}
off += stride;
}
}
pub fn loop_filter_lumanormal_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) {
- for x in 0..4 {
- if check_filter(dst, off + x, stride, alpha, beta) {
+ let flags = check_filter4(dst, off, stride, 1, alpha, beta);
+ for (x, &flag) in flags.iter().enumerate() {
+ if flag {
loop_filter!(lumanormal; dst, off + x, stride, tc0, beta);
}
}
}
pub fn loop_filter_chromaedge_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16) {
- for _ in 0..4 {
+ for _ in 0..2 {
if check_filter(dst, off, 1, alpha, beta) {
loop_filter!(chromaedge; dst, off, 1);
}
}
}
pub fn loop_filter_chromaedge_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16) {
- for x in 0..4 {
+ for x in 0..2 {
if check_filter(dst, off + x, stride, alpha, beta) {
loop_filter!(chromaedge; dst, off + x, stride);
}
}
}
pub fn loop_filter_chromanormal_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) {
- for _ in 0..4 {
+ for _ in 0..2 {
if check_filter(dst, off, 1, alpha, beta) {
loop_filter!(chromanormal; dst, off, 1, tc0);
}
}
}
pub fn loop_filter_chromanormal_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) {
- for x in 0..4 {
+ for x in 0..2 {
if check_filter(dst, off + x, stride, alpha, beta) {
loop_filter!(chromanormal; dst, off + x, stride, tc0);
}