+fn copy_luma(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) {
+ if dst.as_ptr().align_offset(32) == 0 && src.as_ptr().align_offset(32) == 0 &&
+ (w % 64) == 0 && ((dstride | sstride) % 32) == 0 {
+ unsafe {
+ asm!(
+ "2:",
+ " mov {x}, {w}",
+ " 3:",
+ " vmovdqa ymm0, [{src}]",
+ " vmovdqa ymm1, [{src}+32]",
+ " vmovdqa [{dst}], ymm0",
+ " vmovdqa [{dst}+32], ymm1",
+ " add {src}, 64",
+ " add {dst}, 64",
+ " sub {x}, 64",
+ " jnz 3b",
+ " add {src}, {sstep}",
+ " add {dst}, {dstep}",
+ " dec {h}",
+ " jnz 2b",
+ dst = inout(reg) dst.as_mut_ptr() => _,
+ src = inout(reg) src.as_ptr() => _,
+ sstep = in(reg) sstride - w,
+ dstep = in(reg) dstride - w,
+ w = in(reg) w,
+ h = in(reg) h,
+ x = out(reg) _,
+ out("ymm0") _,
+ out("ymm1") _,
+ );
+ }
+ } else {
+ for (dline, sline) in dst.chunks_mut(dstride)
+ .zip(src.chunks(sstride))
+ .take(h) {
+ dline[..w].copy_from_slice(&sline[..w]);
+ }
+ }
+}
+#[cfg(target_arch="x86_64")]