h264/mc: add a stub for using optimised implementations
[nihav.git] / nihav-itu / src / codecs / h264 / dsp / mc / mod.rs
CommitLineData
2f9923e6
KS
1use nihav_core::frame::*;
2use nihav_codec_support::codecs::MV;
3use nihav_codec_support::codecs::blockdsp::*;
4
5#[cfg(not(debug_assertions))]
6mod release;
7#[cfg(not(debug_assertions))]
8use release::*;
9#[cfg(debug_assertions)]
10mod debug;
11#[cfg(debug_assertions)]
12use debug::*;
13
76431444
KS
14type MCFunc = fn (dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, h: usize);
15
2f9923e6
KS
16fn clip_u8(val: i16) -> u8 { val.max(0).min(255) as u8 }
17
3c506c7a
KS
18trait RegisterSIMD {
19 fn register_simd(&mut self);
20}
21
2f9923e6
KS
22pub struct H264MC {
23 avg_buf: NAVideoBufferRef<u8>,
76431444
KS
24 pub put_block_weighted: [fn (dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]); 4],
25 pub put_block_weighted2: [fn (dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]); 4],
2f9923e6
KS
26}
27
28impl H264MC {
29 pub fn new(avg_buf: NAVideoBufferRef<u8>) -> Self {
3c506c7a 30 let mut obj = Self {
76431444
KS
31 avg_buf,
32 put_block_weighted: [put_blk_w_2, put_blk_w_4, put_blk_w_8, put_blk_w_16],
33 put_block_weighted2: [put_blk_w2_2, put_blk_w2_4, put_blk_w2_8, put_blk_w2_16],
3c506c7a
KS
34 };
35 obj.register_simd();
36 obj
2f9923e6
KS
37 }
38 pub fn do_mc(&mut self, frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) {
39 let mut ebuf = [0u8; 22 * 22];
40 let mvx = mv.x >> 2;
41 let mvy = mv.y >> 2;
42 let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize;
43 let pre = if mode != 0 { 2isize } else { 0 };
44 let post = if mode != 0 { 3isize } else { 0 };
45 let (yw, yh) = refpic.get_dimensions(0);
46 let src = refpic.get_data();
47 let systride = refpic.get_stride(0);
48 let src_x = (xpos as isize) + (mvx as isize);
49 let src_y = (ypos as isize) + (mvy as isize);
50 let (ysrc, ystride) = if (src_x - pre < 0) || (src_x + (w as isize) + post > (yw as isize)) || (src_y - pre < 0) || (src_y + (h as isize) + post > (yh as isize)) {
51 let add = (pre + post) as usize;
52 edge_emu(&refpic, src_x - pre, src_y - pre, w + add, h + add, &mut ebuf, 22, 0, 0);
53 (ebuf.as_slice(), 22)
54 } else {
55 (&src[refpic.get_offset(0) + ((src_x - pre) as usize) + ((src_y - pre) as usize) * systride..], systride)
56 };
76431444
KS
57 let wmode = match w {
58 4 => 0,
59 8 => 1,
60 _ => 2,
61 };
62 (H264_LUMA_INTERP[wmode][mode])(&mut frm.data[frm.offset[0] + xpos + ypos * frm.stride[0]..], frm.stride[0], ysrc, ystride, h);
2f9923e6
KS
63
64 let (cw, ch) = refpic.get_dimensions(1);
65 let mvx = mv.x >> 3;
66 let mvy = mv.y >> 3;
67 let dx = (mv.x & 7) as u16;
68 let dy = (mv.y & 7) as u16;
69 let src_x = ((xpos >> 1) as isize) + (mvx as isize);
70 let src_y = ((ypos >> 1) as isize) + (mvy as isize);
71 let suoff = refpic.get_offset(1);
72 let svoff = refpic.get_offset(2);
73 let sustride = refpic.get_stride(1);
74 let svstride = refpic.get_stride(2);
75 let cbw = w / 2;
76 let cbh = h / 2;
77 let (csrc, cstride) = if (src_x < 0) || (src_x + (cbw as isize) + 1 > (cw as isize)) || (src_y < 0) || (src_y + (cbh as isize) + 1 > (ch as isize)) {
78 edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf, 18, 1, 4);
79 edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf[9..], 18, 2, 4);
80 ([&ebuf, &ebuf[9..]], [18, 18])
81 } else {
82 ([&src[suoff + (src_x as usize) + (src_y as usize) * sustride..],
83 &src[svoff + (src_x as usize) + (src_y as usize) * svstride..]],
84 [sustride, svstride])
85 };
86 for chroma in 1..3 {
87 let off = frm.offset[chroma] + xpos / 2 + (ypos / 2) * frm.stride[chroma];
88 chroma_interp(&mut frm.data[off..], frm.stride[chroma], csrc[chroma - 1], cstride[chroma - 1], dx, dy, cbw, cbh);
89 }
90 }
91
92 pub fn mc_blocks(&mut self, ydst: &mut [u8], udst: &mut [u8], vdst: &mut [u8], refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) {
93 let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize;
94
95 let pre = if mode != 0 { 2 } else { 0 };
96 let post = if mode != 0 { 3 } else { 0 };
97 let (width, height) = refpic.get_dimensions(0);
98 let sx = (xpos as isize) + ((mv.x >> 2) as isize);
99 let sy = (ypos as isize) + ((mv.y >> 2) as isize);
100
101 const EBUF_STRIDE: usize = 32;
102 let mut ebuf = [0u8; EBUF_STRIDE * (16 + 2 + 3)];
103
76431444
KS
104 let wmode = match w {
105 4 => 0,
106 8 => 1,
107 _ => 2,
108 };
2f9923e6
KS
109 if (sx - pre < 0) || (sx + (w as isize) + post > (width as isize)) ||
110 (sy - pre < 0) || (sy + (h as isize) + post > (height as isize)) {
111 let edge = (pre + post) as usize;
112 edge_emu(&refpic, sx - pre, sy - pre, w + edge, h + edge,
113 &mut ebuf, EBUF_STRIDE, 0, 0);
76431444 114 (H264_LUMA_INTERP[wmode][mode])(ydst, 16, &ebuf, EBUF_STRIDE, h);
2f9923e6
KS
115 } else {
116 let sstride = refpic.get_stride(0);
117 let soff = refpic.get_offset(0);
118 let sdta = refpic.get_data();
119 let sbuf: &[u8] = sdta.as_slice();
120 let saddr = soff + ((sx - pre) as usize) + ((sy - pre) as usize) * sstride;
76431444 121 (H264_LUMA_INTERP[wmode][mode])(ydst, 16, &sbuf[saddr..], sstride, h);
2f9923e6
KS
122 }
123
124 let (cw, ch) = refpic.get_dimensions(1);
125 let mvx = mv.x >> 3;
126 let mvy = mv.y >> 3;
127 let dx = (mv.x & 7) as u16;
128 let dy = (mv.y & 7) as u16;
129 let src_x = ((xpos >> 1) as isize) + (mvx as isize);
130 let src_y = ((ypos >> 1) as isize) + (mvy as isize);
131 let suoff = refpic.get_offset(1);
132 let svoff = refpic.get_offset(2);
133 let sustride = refpic.get_stride(1);
134 let svstride = refpic.get_stride(2);
135 let src = refpic.get_data();
136 let cbw = w / 2;
137 let cbh = h / 2;
138 let (csrc, cstride) = if (src_x < 0) || (src_x + (cbw as isize) + 1 > (cw as isize)) || (src_y < 0) || (src_y + (cbh as isize) + 1 > (ch as isize)) {
139 edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf, 18, 1, 4);
140 edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf[9..], 18, 2, 4);
141 ([&ebuf, &ebuf[9..]], [18, 18])
142 } else {
143 ([&src[suoff + (src_x as usize) + (src_y as usize) * sustride..],
144 &src[svoff + (src_x as usize) + (src_y as usize) * svstride..]],
145 [sustride, svstride])
146 };
147 chroma_interp(udst, 16, csrc[0], cstride[0], dx, dy, cbw, cbh);
148 chroma_interp(vdst, 16, csrc[1], cstride[1], dx, dy, cbw, cbh);
149 }
150
151 pub fn avg(&mut self, dst: &mut [u8], dstride: usize, bw: usize, bh: usize, comp: usize) {
152 let afrm = NASimpleVideoFrame::from_video_buf(&mut self.avg_buf).unwrap();
153 let src = &afrm.data[afrm.offset[comp]..];
154 let sstride = afrm.stride[comp];
155 for (dline, sline) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(bh) {
156 for (dst, src) in dline.iter_mut().zip(sline.iter()).take(bw) {
157 *dst = ((u16::from(*dst) + u16::from(*src) + 1) >> 1) as u8;
158 }
159 }
160 }
161
162 pub fn do_mc_avg(&mut self, frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) {
163 let mut abuf = self.avg_buf.clone();
164 let mut afrm = NASimpleVideoFrame::from_video_buf(&mut abuf).unwrap();
165 let amv = MV { x: mv.x + (xpos as i16) * 4, y: mv.y + (ypos as i16) * 4 };
166 self.do_mc(&mut afrm, refpic, 0, 0, w, h, amv);
167 for comp in 0..3 {
168 let shift = if comp == 0 { 0 } else { 1 };
169 self.avg(&mut frm.data[frm.offset[comp] + (xpos >> shift) + (ypos >> shift) * frm.stride[comp]..], frm.stride[comp], w >> shift, h >> shift, comp);
170 }
171 }
172
2f9923e6
KS
173 pub fn gray_block(&mut self, frm: &mut NASimpleVideoFrame<u8>, x: usize, y: usize, w: usize, h: usize) {
174 let yoff = frm.offset[0] + x + y * frm.stride[0];
175 let coff = [frm.offset[1] + x / 2 + y / 2 * frm.stride[1],
176 frm.offset[2] + x / 2 + y / 2 * frm.stride[2]];
177 for row in frm.data[yoff..].chunks_mut(frm.stride[0]).take(h) {
178 for el in row[..w].iter_mut() {
179 *el = 128;
180 }
181 }
182 for chroma in 0..2 {
183 for row in frm.data[coff[chroma]..].chunks_mut(frm.stride[chroma + 1]).take(h / 2) {
184 for el in row[..w / 2].iter_mut() {
185 *el = 128;
186 }
187 }
188 }
189 }
190}
76431444
KS
191
192fn put_block_weighted(dst: &mut [u8], stride: usize, src: &[u8], w: usize, h: usize, wparams: [i8; 3]) {
193 let weight = i16::from(wparams[0]);
194 let offset = i16::from(wparams[1]);
195 let wshift = wparams[2] as u8;
196 let bias = (1 << wshift) >> 1;
197
198 for (drow, srow) in dst.chunks_mut(stride).zip(src.chunks(16)).take(h) {
199 for (dst, &src) in drow[..w].iter_mut().zip(srow.iter()) {
200 *dst = clip_u8(((i16::from(src) * weight + bias) >> wshift) + offset);
201 }
202 }
203}
204
205fn put_blk_w_2(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) {
206 put_block_weighted(dst, stride, src, 2, h, wparams);
207}
208fn put_blk_w_4(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) {
209 put_block_weighted(dst, stride, src, 4, h, wparams);
210}
211fn put_blk_w_8(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) {
212 put_block_weighted(dst, stride, src, 8, h, wparams);
213}
214fn put_blk_w_16(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) {
215 put_block_weighted(dst, stride, src, 16, h, wparams);
216}
217
218fn put_block_weighted2(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], w: usize, h: usize, wparams: [i8; 5]) {
219 let weight0 = i16::from(wparams[0]);
220 let offset0 = i16::from(wparams[1]);
221 let weight1 = i16::from(wparams[2]);
222 let offset1 = i16::from(wparams[3]);
223 let wshift = (wparams[4] as u8) + 1;
224 let offset = (offset0 + offset1 + 1) >> 1;
225 let bias = (1 << wshift) >> 1;
226
227 for (drow, (srow0, srow1)) in dst.chunks_mut(stride).zip(src0.chunks(16).zip(src1.chunks(16))).take(h) {
228 for (dst, (&src0, &src1)) in drow[..w].iter_mut().zip(srow0.iter().zip(srow1.iter())) {
229 *dst = clip_u8(((i16::from(src0) * weight0 + i16::from(src1) * weight1 + bias) >> wshift) + offset);
230 }
231 }
232}
233
234fn put_blk_w2_2(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) {
235 put_block_weighted2(dst, stride, src0, src1, 2, h, wparams);
236}
237fn put_blk_w2_4(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) {
238 put_block_weighted2(dst, stride, src0, src1, 4, h, wparams);
239}
240fn put_blk_w2_8(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) {
241 put_block_weighted2(dst, stride, src0, src1, 8, h, wparams);
242}
243fn put_blk_w2_16(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) {
244 put_block_weighted2(dst, stride, src0, src1, 16, h, wparams);
245}