ca4e77f5728764339ff8e3552594166591e057f9
[nihav.git] / nihav-itu / src / codecs / h264 / dsp / mc / mod.rs
1 use nihav_core::frame::*;
2 use nihav_codec_support::codecs::MV;
3 use nihav_codec_support::codecs::blockdsp::*;
4
5 #[cfg(not(debug_assertions))]
6 mod release;
7 #[cfg(not(debug_assertions))]
8 use release::*;
9 #[cfg(debug_assertions)]
10 mod debug;
11 #[cfg(debug_assertions)]
12 use debug::*;
13
14 type MCFunc = fn (dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, h: usize);
15
16 fn clip_u8(val: i16) -> u8 { val.max(0).min(255) as u8 }
17
18 trait RegisterSIMD {
19 fn register_simd(&mut self);
20 }
21
22 pub struct H264MC {
23 avg_buf: NAVideoBufferRef<u8>,
24 pub put_block_weighted: [fn (dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]); 4],
25 pub put_block_weighted2: [fn (dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]); 4],
26 pub chroma_interp: [fn (dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, dx: u16, dy: u16, h: usize); 3],
27 avg: [fn (dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bh: usize); 4],
28
29 width: usize,
30 height: usize,
31 }
32
33 impl H264MC {
34 pub fn new(avg_buf: NAVideoBufferRef<u8>) -> Self {
35 let mut obj = Self {
36 avg_buf,
37 put_block_weighted: [put_blk_w_2, put_blk_w_4, put_blk_w_8, put_blk_w_16],
38 put_block_weighted2: [put_blk_w2_2, put_blk_w2_4, put_blk_w2_8, put_blk_w2_16],
39 chroma_interp: [chroma_interp_2, chroma_interp_4, chroma_interp_8],
40 avg: [avg_2, avg_4, avg_8, avg_16],
41 width: 0, height: 0,
42 };
43 obj.register_simd();
44 obj
45 }
46 pub fn set_dimensions(&mut self, width: usize, height: usize) {
47 self.width = width;
48 self.height = height;
49 }
50 pub fn do_mc(&mut self, frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) {
51 let mut ebuf = [0u8; 22 * 22];
52 let mvx = mv.x >> 2;
53 let mvy = mv.y >> 2;
54 let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize;
55 let pre = if mode != 0 { 2isize } else { 0 };
56 let post = if mode != 0 { 3isize } else { 0 };
57 let (yw, yh) = (self.width, self.height);
58 let src = refpic.get_data();
59 let systride = refpic.get_stride(0);
60 let src_x = (xpos as isize) + (mvx as isize);
61 let src_y = (ypos as isize) + (mvy as isize);
62 let (ysrc, ystride) = if (src_x - pre < 0) || (src_x + (w as isize) + post > (yw as isize)) || (src_y - pre < 0) || (src_y + (h as isize) + post > (yh as isize)) {
63 let add = (pre + post) as usize;
64 edge_emu(&refpic, src_x - pre, src_y - pre, w + add, h + add, &mut ebuf, 22, 0, 0);
65 (ebuf.as_slice(), 22)
66 } else {
67 (&src[refpic.get_offset(0) + ((src_x - pre) as usize) + ((src_y - pre) as usize) * systride..], systride)
68 };
69 let wmode = match w {
70 4 => 0,
71 8 => 1,
72 _ => 2,
73 };
74 (H264_LUMA_INTERP[wmode][mode])(&mut frm.data[frm.offset[0] + xpos + ypos * frm.stride[0]..], frm.stride[0], ysrc, ystride, h);
75
76 let (cw, ch) = (self.width >> 1, self.height >> 1);
77 let mvx = mv.x >> 3;
78 let mvy = mv.y >> 3;
79 let dx = (mv.x & 7) as u16;
80 let dy = (mv.y & 7) as u16;
81 let src_x = ((xpos >> 1) as isize) + (mvx as isize);
82 let src_y = ((ypos >> 1) as isize) + (mvy as isize);
83 let suoff = refpic.get_offset(1);
84 let svoff = refpic.get_offset(2);
85 let sustride = refpic.get_stride(1);
86 let svstride = refpic.get_stride(2);
87 let cbw = w / 2;
88 let cbh = h / 2;
89 let (csrc, cstride) = if (src_x < 0) || (src_x + (cbw as isize) + 1 > (cw as isize)) || (src_y < 0) || (src_y + (cbh as isize) + 1 > (ch as isize)) {
90 edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf, 18, 1, 4);
91 edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf[9..], 18, 2, 4);
92 ([&ebuf, &ebuf[9..]], [18, 18])
93 } else {
94 ([&src[suoff + (src_x as usize) + (src_y as usize) * sustride..],
95 &src[svoff + (src_x as usize) + (src_y as usize) * svstride..]],
96 [sustride, svstride])
97 };
98 for chroma in 1..3 {
99 let off = frm.offset[chroma] + xpos / 2 + (ypos / 2) * frm.stride[chroma];
100 (self.chroma_interp[wmode])(&mut frm.data[off..], frm.stride[chroma], csrc[chroma - 1], cstride[chroma - 1], dx, dy, cbh);
101 }
102 }
103
104 pub fn mc_blocks(&mut self, ydst: &mut [u8], udst: &mut [u8], vdst: &mut [u8], refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) {
105 let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize;
106
107 let pre = if mode != 0 { 2 } else { 0 };
108 let post = if mode != 0 { 3 } else { 0 };
109 let (width, height) = (self.width, self.height);
110 let sx = (xpos as isize) + ((mv.x >> 2) as isize);
111 let sy = (ypos as isize) + ((mv.y >> 2) as isize);
112
113 const EBUF_STRIDE: usize = 32;
114 let mut ebuf = [0u8; EBUF_STRIDE * (16 + 2 + 3)];
115
116 let wmode = match w {
117 4 => 0,
118 8 => 1,
119 _ => 2,
120 };
121 if (sx - pre < 0) || (sx + (w as isize) + post > (width as isize)) ||
122 (sy - pre < 0) || (sy + (h as isize) + post > (height as isize)) {
123 let edge = (pre + post) as usize;
124 edge_emu(&refpic, sx - pre, sy - pre, w + edge, h + edge,
125 &mut ebuf, EBUF_STRIDE, 0, 0);
126 (H264_LUMA_INTERP[wmode][mode])(ydst, 16, &ebuf, EBUF_STRIDE, h);
127 } else {
128 let sstride = refpic.get_stride(0);
129 let soff = refpic.get_offset(0);
130 let sdta = refpic.get_data();
131 let sbuf: &[u8] = sdta.as_slice();
132 let saddr = soff + ((sx - pre) as usize) + ((sy - pre) as usize) * sstride;
133 (H264_LUMA_INTERP[wmode][mode])(ydst, 16, &sbuf[saddr..], sstride, h);
134 }
135
136 let (cw, ch) = (self.width >> 1, self.height >> 1);
137 let mvx = mv.x >> 3;
138 let mvy = mv.y >> 3;
139 let dx = (mv.x & 7) as u16;
140 let dy = (mv.y & 7) as u16;
141 let src_x = ((xpos >> 1) as isize) + (mvx as isize);
142 let src_y = ((ypos >> 1) as isize) + (mvy as isize);
143 let suoff = refpic.get_offset(1);
144 let svoff = refpic.get_offset(2);
145 let sustride = refpic.get_stride(1);
146 let svstride = refpic.get_stride(2);
147 let src = refpic.get_data();
148 let cbw = w / 2;
149 let cbh = h / 2;
150 let (csrc, cstride) = if (src_x < 0) || (src_x + (cbw as isize) + 1 > (cw as isize)) || (src_y < 0) || (src_y + (cbh as isize) + 1 > (ch as isize)) {
151 edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf, 18, 1, 4);
152 edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf[9..], 18, 2, 4);
153 ([&ebuf, &ebuf[9..]], [18, 18])
154 } else {
155 ([&src[suoff + (src_x as usize) + (src_y as usize) * sustride..],
156 &src[svoff + (src_x as usize) + (src_y as usize) * svstride..]],
157 [sustride, svstride])
158 };
159 (self.chroma_interp[wmode])(udst, 16, csrc[0], cstride[0], dx, dy, cbh);
160 (self.chroma_interp[wmode])(vdst, 16, csrc[1], cstride[1], dx, dy, cbh);
161 }
162
163 pub fn do_mc_avg(&mut self, frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) {
164 let mut abuf = self.avg_buf.clone();
165 let mut afrm = NASimpleVideoFrame::from_video_buf(&mut abuf).unwrap();
166 let amv = MV { x: mv.x + (xpos as i16) * 4, y: mv.y + (ypos as i16) * 4 };
167 self.do_mc(&mut afrm, refpic, 0, 0, w, h, amv);
168 let wsize = match w {
169 2 => 0,
170 4 => 1,
171 8 => 2,
172 _ => 3,
173 };
174 let src = self.avg_buf.get_data();
175 for comp in 0..3 {
176 let shift = if comp == 0 { 0 } else { 1 };
177 let sstride = self.avg_buf.get_stride(comp);
178 let soff = self.avg_buf.get_offset(comp);
179 (self.avg[wsize - shift])(&mut frm.data[frm.offset[comp] + (xpos >> shift) + (ypos >> shift) * frm.stride[comp]..], frm.stride[comp], &src[soff..], sstride, h >> shift);
180 }
181 }
182
183 pub fn gray_block(&mut self, frm: &mut NASimpleVideoFrame<u8>, x: usize, y: usize, w: usize, h: usize) {
184 let yoff = frm.offset[0] + x + y * frm.stride[0];
185 let coff = [frm.offset[1] + x / 2 + y / 2 * frm.stride[1],
186 frm.offset[2] + x / 2 + y / 2 * frm.stride[2]];
187 for row in frm.data[yoff..].chunks_mut(frm.stride[0]).take(h) {
188 for el in row[..w].iter_mut() {
189 *el = 128;
190 }
191 }
192 for chroma in 0..2 {
193 for row in frm.data[coff[chroma]..].chunks_mut(frm.stride[chroma + 1]).take(h / 2) {
194 for el in row[..w / 2].iter_mut() {
195 *el = 128;
196 }
197 }
198 }
199 }
200 }
201
202 fn avg(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bw: usize, bh: usize) {
203 for (dline, sline) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(bh) {
204 for (dst, src) in dline.iter_mut().zip(sline.iter()).take(bw) {
205 *dst = ((u16::from(*dst) + u16::from(*src) + 1) >> 1) as u8;
206 }
207 }
208 }
209
210 fn avg_2(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bh: usize) {
211 let _ = src[sstride + 1];
212 let _ = dst[dstride + 1];
213 dst[0] = ((u16::from(dst[0]) + u16::from(src[0]) + 1) >> 1) as u8;
214 dst[1] = ((u16::from(dst[1]) + u16::from(src[1]) + 1) >> 1) as u8;
215 dst[dstride] = ((u16::from(dst[dstride]) + u16::from(src[sstride]) + 1) >> 1) as u8;
216 dst[dstride + 1] = ((u16::from(dst[dstride + 1]) + u16::from(src[sstride + 1]) + 1) >> 1) as u8;
217 if bh == 4 {
218 let _ = src[sstride * 3 + 1];
219 let _ = dst[dstride * 3 + 1];
220 dst[dstride * 2] = ((u16::from(dst[dstride * 2]) + u16::from(src[sstride * 2]) + 1) >> 1) as u8;
221 dst[dstride * 2 + 1] = ((u16::from(dst[dstride * 2 + 1]) + u16::from(src[sstride * 2 + 1]) + 1) >> 1) as u8;
222 dst[dstride * 3] = ((u16::from(dst[dstride * 3]) + u16::from(src[sstride * 3]) + 1) >> 1) as u8;
223 dst[dstride * 3 + 1] = ((u16::from(dst[dstride * 3 + 1]) + u16::from(src[sstride * 3 + 1]) + 1) >> 1) as u8;
224 }
225 }
226 fn avg_4(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bh: usize) {
227 avg(dst, dstride, src, sstride, 4, bh);
228 }
229 fn avg_8(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bh: usize) {
230 avg(dst, dstride, src, sstride, 8, bh);
231 }
232 fn avg_16(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, bh: usize) {
233 avg(dst, dstride, src, sstride, 16, bh);
234 }
235
236 fn put_block_weighted(dst: &mut [u8], stride: usize, src: &[u8], w: usize, h: usize, wparams: [i8; 3]) {
237 let weight = i16::from(wparams[0]);
238 let offset = i16::from(wparams[1]);
239 let wshift = wparams[2] as u8;
240 let bias = (1 << wshift) >> 1;
241
242 for (drow, srow) in dst.chunks_mut(stride).zip(src.chunks(16)).take(h) {
243 for (dst, &src) in drow[..w].iter_mut().zip(srow.iter()) {
244 *dst = clip_u8(((i16::from(src) * weight + bias) >> wshift) + offset);
245 }
246 }
247 }
248
249 fn put_blk_w_2(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) {
250 put_block_weighted(dst, stride, src, 2, h, wparams);
251 }
252 fn put_blk_w_4(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) {
253 put_block_weighted(dst, stride, src, 4, h, wparams);
254 }
255 fn put_blk_w_8(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) {
256 put_block_weighted(dst, stride, src, 8, h, wparams);
257 }
258 fn put_blk_w_16(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) {
259 put_block_weighted(dst, stride, src, 16, h, wparams);
260 }
261
262 fn put_block_weighted2(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], w: usize, h: usize, wparams: [i8; 5]) {
263 let weight0 = i16::from(wparams[0]);
264 let offset0 = i16::from(wparams[1]);
265 let weight1 = i16::from(wparams[2]);
266 let offset1 = i16::from(wparams[3]);
267 let wshift = (wparams[4] as u8) + 1;
268 let offset = (offset0 + offset1 + 1) >> 1;
269 let bias = (1 << wshift) >> 1;
270
271 for (drow, (srow0, srow1)) in dst.chunks_mut(stride).zip(src0.chunks(16).zip(src1.chunks(16))).take(h) {
272 for (dst, (&src0, &src1)) in drow[..w].iter_mut().zip(srow0.iter().zip(srow1.iter())) {
273 *dst = clip_u8(((i16::from(src0) * weight0 + i16::from(src1) * weight1 + bias) >> wshift) + offset);
274 }
275 }
276 }
277
278 fn put_blk_w2_2(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) {
279 let weight0 = i16::from(wparams[0]);
280 let offset0 = i16::from(wparams[1]);
281 let weight1 = i16::from(wparams[2]);
282 let offset1 = i16::from(wparams[3]);
283 let wshift = (wparams[4] as u8) + 1;
284 let offset = (offset0 + offset1 + 1) >> 1;
285 let bias = (1 << wshift) >> 1;
286
287 let _ = src0[16 + 1];
288 let _ = src1[16 + 1];
289 let _ = dst[stride + 1];
290 dst[0] = clip_u8(((i16::from(src0[ 0]) * weight0 + i16::from(src1[ 0]) * weight1 + bias) >> wshift) + offset);
291 dst[1] = clip_u8(((i16::from(src0[ 1]) * weight0 + i16::from(src1[ 1]) * weight1 + bias) >> wshift) + offset);
292 dst[stride] = clip_u8(((i16::from(src0[16]) * weight0 + i16::from(src1[16]) * weight1 + bias) >> wshift) + offset);
293 dst[stride + 1] = clip_u8(((i16::from(src0[17]) * weight0 + i16::from(src1[17]) * weight1 + bias) >> wshift) + offset);
294 if h == 4 {
295 let _ = src0[16 * 3 + 1];
296 let _ = src1[16 * 3 + 1];
297 let _ = dst[stride * 3 + 1];
298 dst[stride * 2] = clip_u8(((i16::from(src0[32]) * weight0 + i16::from(src1[32]) * weight1 + bias) >> wshift) + offset);
299 dst[stride * 2 + 1] = clip_u8(((i16::from(src0[33]) * weight0 + i16::from(src1[33]) * weight1 + bias) >> wshift) + offset);
300 dst[stride * 3] = clip_u8(((i16::from(src0[48]) * weight0 + i16::from(src1[48]) * weight1 + bias) >> wshift) + offset);
301 dst[stride * 3 + 1] = clip_u8(((i16::from(src0[49]) * weight0 + i16::from(src1[49]) * weight1 + bias) >> wshift) + offset);
302 }
303 }
304 fn put_blk_w2_4(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) {
305 put_block_weighted2(dst, stride, src0, src1, 4, h, wparams);
306 }
307 fn put_blk_w2_8(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) {
308 put_block_weighted2(dst, stride, src0, src1, 8, h, wparams);
309 }
310 fn put_blk_w2_16(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) {
311 put_block_weighted2(dst, stride, src0, src1, 16, h, wparams);
312 }