]>
Commit | Line | Data |
---|---|---|
1 | use nihav_core::frame::*; | |
2 | use nihav_codec_support::codecs::MV; | |
3 | use nihav_codec_support::codecs::blockdsp::*; | |
4 | ||
5 | #[cfg(not(debug_assertions))] | |
6 | mod release; | |
7 | #[cfg(not(debug_assertions))] | |
8 | use release::*; | |
9 | #[cfg(debug_assertions)] | |
10 | mod debug; | |
11 | #[cfg(debug_assertions)] | |
12 | use debug::*; | |
13 | ||
14 | type MCFunc = fn (dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, h: usize); | |
15 | ||
16 | fn clip_u8(val: i16) -> u8 { val.max(0).min(255) as u8 } | |
17 | ||
18 | pub struct H264MC { | |
19 | avg_buf: NAVideoBufferRef<u8>, | |
20 | pub put_block_weighted: [fn (dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]); 4], | |
21 | pub put_block_weighted2: [fn (dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]); 4], | |
22 | } | |
23 | ||
24 | impl H264MC { | |
25 | pub fn new(avg_buf: NAVideoBufferRef<u8>) -> Self { | |
26 | Self { | |
27 | avg_buf, | |
28 | put_block_weighted: [put_blk_w_2, put_blk_w_4, put_blk_w_8, put_blk_w_16], | |
29 | put_block_weighted2: [put_blk_w2_2, put_blk_w2_4, put_blk_w2_8, put_blk_w2_16], | |
30 | } | |
31 | } | |
32 | pub fn do_mc(&mut self, frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) { | |
33 | let mut ebuf = [0u8; 22 * 22]; | |
34 | let mvx = mv.x >> 2; | |
35 | let mvy = mv.y >> 2; | |
36 | let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize; | |
37 | let pre = if mode != 0 { 2isize } else { 0 }; | |
38 | let post = if mode != 0 { 3isize } else { 0 }; | |
39 | let (yw, yh) = refpic.get_dimensions(0); | |
40 | let src = refpic.get_data(); | |
41 | let systride = refpic.get_stride(0); | |
42 | let src_x = (xpos as isize) + (mvx as isize); | |
43 | let src_y = (ypos as isize) + (mvy as isize); | |
44 | let (ysrc, ystride) = if (src_x - pre < 0) || (src_x + (w as isize) + post > (yw as isize)) || (src_y - pre < 0) || (src_y + (h as isize) + post > (yh as isize)) { | |
45 | let add = (pre + post) as usize; | |
46 | edge_emu(&refpic, src_x - pre, src_y - pre, w + add, h + add, &mut ebuf, 22, 0, 0); | |
47 | (ebuf.as_slice(), 22) | |
48 | } else { | |
49 | (&src[refpic.get_offset(0) + ((src_x - pre) as usize) + ((src_y - pre) as usize) * systride..], systride) | |
50 | }; | |
51 | let wmode = match w { | |
52 | 4 => 0, | |
53 | 8 => 1, | |
54 | _ => 2, | |
55 | }; | |
56 | (H264_LUMA_INTERP[wmode][mode])(&mut frm.data[frm.offset[0] + xpos + ypos * frm.stride[0]..], frm.stride[0], ysrc, ystride, h); | |
57 | ||
58 | let (cw, ch) = refpic.get_dimensions(1); | |
59 | let mvx = mv.x >> 3; | |
60 | let mvy = mv.y >> 3; | |
61 | let dx = (mv.x & 7) as u16; | |
62 | let dy = (mv.y & 7) as u16; | |
63 | let src_x = ((xpos >> 1) as isize) + (mvx as isize); | |
64 | let src_y = ((ypos >> 1) as isize) + (mvy as isize); | |
65 | let suoff = refpic.get_offset(1); | |
66 | let svoff = refpic.get_offset(2); | |
67 | let sustride = refpic.get_stride(1); | |
68 | let svstride = refpic.get_stride(2); | |
69 | let cbw = w / 2; | |
70 | let cbh = h / 2; | |
71 | let (csrc, cstride) = if (src_x < 0) || (src_x + (cbw as isize) + 1 > (cw as isize)) || (src_y < 0) || (src_y + (cbh as isize) + 1 > (ch as isize)) { | |
72 | edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf, 18, 1, 4); | |
73 | edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf[9..], 18, 2, 4); | |
74 | ([&ebuf, &ebuf[9..]], [18, 18]) | |
75 | } else { | |
76 | ([&src[suoff + (src_x as usize) + (src_y as usize) * sustride..], | |
77 | &src[svoff + (src_x as usize) + (src_y as usize) * svstride..]], | |
78 | [sustride, svstride]) | |
79 | }; | |
80 | for chroma in 1..3 { | |
81 | let off = frm.offset[chroma] + xpos / 2 + (ypos / 2) * frm.stride[chroma]; | |
82 | chroma_interp(&mut frm.data[off..], frm.stride[chroma], csrc[chroma - 1], cstride[chroma - 1], dx, dy, cbw, cbh); | |
83 | } | |
84 | } | |
85 | ||
86 | pub fn mc_blocks(&mut self, ydst: &mut [u8], udst: &mut [u8], vdst: &mut [u8], refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) { | |
87 | let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize; | |
88 | ||
89 | let pre = if mode != 0 { 2 } else { 0 }; | |
90 | let post = if mode != 0 { 3 } else { 0 }; | |
91 | let (width, height) = refpic.get_dimensions(0); | |
92 | let sx = (xpos as isize) + ((mv.x >> 2) as isize); | |
93 | let sy = (ypos as isize) + ((mv.y >> 2) as isize); | |
94 | ||
95 | const EBUF_STRIDE: usize = 32; | |
96 | let mut ebuf = [0u8; EBUF_STRIDE * (16 + 2 + 3)]; | |
97 | ||
98 | let wmode = match w { | |
99 | 4 => 0, | |
100 | 8 => 1, | |
101 | _ => 2, | |
102 | }; | |
103 | if (sx - pre < 0) || (sx + (w as isize) + post > (width as isize)) || | |
104 | (sy - pre < 0) || (sy + (h as isize) + post > (height as isize)) { | |
105 | let edge = (pre + post) as usize; | |
106 | edge_emu(&refpic, sx - pre, sy - pre, w + edge, h + edge, | |
107 | &mut ebuf, EBUF_STRIDE, 0, 0); | |
108 | (H264_LUMA_INTERP[wmode][mode])(ydst, 16, &ebuf, EBUF_STRIDE, h); | |
109 | } else { | |
110 | let sstride = refpic.get_stride(0); | |
111 | let soff = refpic.get_offset(0); | |
112 | let sdta = refpic.get_data(); | |
113 | let sbuf: &[u8] = sdta.as_slice(); | |
114 | let saddr = soff + ((sx - pre) as usize) + ((sy - pre) as usize) * sstride; | |
115 | (H264_LUMA_INTERP[wmode][mode])(ydst, 16, &sbuf[saddr..], sstride, h); | |
116 | } | |
117 | ||
118 | let (cw, ch) = refpic.get_dimensions(1); | |
119 | let mvx = mv.x >> 3; | |
120 | let mvy = mv.y >> 3; | |
121 | let dx = (mv.x & 7) as u16; | |
122 | let dy = (mv.y & 7) as u16; | |
123 | let src_x = ((xpos >> 1) as isize) + (mvx as isize); | |
124 | let src_y = ((ypos >> 1) as isize) + (mvy as isize); | |
125 | let suoff = refpic.get_offset(1); | |
126 | let svoff = refpic.get_offset(2); | |
127 | let sustride = refpic.get_stride(1); | |
128 | let svstride = refpic.get_stride(2); | |
129 | let src = refpic.get_data(); | |
130 | let cbw = w / 2; | |
131 | let cbh = h / 2; | |
132 | let (csrc, cstride) = if (src_x < 0) || (src_x + (cbw as isize) + 1 > (cw as isize)) || (src_y < 0) || (src_y + (cbh as isize) + 1 > (ch as isize)) { | |
133 | edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf, 18, 1, 4); | |
134 | edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf[9..], 18, 2, 4); | |
135 | ([&ebuf, &ebuf[9..]], [18, 18]) | |
136 | } else { | |
137 | ([&src[suoff + (src_x as usize) + (src_y as usize) * sustride..], | |
138 | &src[svoff + (src_x as usize) + (src_y as usize) * svstride..]], | |
139 | [sustride, svstride]) | |
140 | }; | |
141 | chroma_interp(udst, 16, csrc[0], cstride[0], dx, dy, cbw, cbh); | |
142 | chroma_interp(vdst, 16, csrc[1], cstride[1], dx, dy, cbw, cbh); | |
143 | } | |
144 | ||
145 | pub fn avg(&mut self, dst: &mut [u8], dstride: usize, bw: usize, bh: usize, comp: usize) { | |
146 | let afrm = NASimpleVideoFrame::from_video_buf(&mut self.avg_buf).unwrap(); | |
147 | let src = &afrm.data[afrm.offset[comp]..]; | |
148 | let sstride = afrm.stride[comp]; | |
149 | for (dline, sline) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(bh) { | |
150 | for (dst, src) in dline.iter_mut().zip(sline.iter()).take(bw) { | |
151 | *dst = ((u16::from(*dst) + u16::from(*src) + 1) >> 1) as u8; | |
152 | } | |
153 | } | |
154 | } | |
155 | ||
156 | pub fn do_mc_avg(&mut self, frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) { | |
157 | let mut abuf = self.avg_buf.clone(); | |
158 | let mut afrm = NASimpleVideoFrame::from_video_buf(&mut abuf).unwrap(); | |
159 | let amv = MV { x: mv.x + (xpos as i16) * 4, y: mv.y + (ypos as i16) * 4 }; | |
160 | self.do_mc(&mut afrm, refpic, 0, 0, w, h, amv); | |
161 | for comp in 0..3 { | |
162 | let shift = if comp == 0 { 0 } else { 1 }; | |
163 | self.avg(&mut frm.data[frm.offset[comp] + (xpos >> shift) + (ypos >> shift) * frm.stride[comp]..], frm.stride[comp], w >> shift, h >> shift, comp); | |
164 | } | |
165 | } | |
166 | ||
167 | pub fn gray_block(&mut self, frm: &mut NASimpleVideoFrame<u8>, x: usize, y: usize, w: usize, h: usize) { | |
168 | let yoff = frm.offset[0] + x + y * frm.stride[0]; | |
169 | let coff = [frm.offset[1] + x / 2 + y / 2 * frm.stride[1], | |
170 | frm.offset[2] + x / 2 + y / 2 * frm.stride[2]]; | |
171 | for row in frm.data[yoff..].chunks_mut(frm.stride[0]).take(h) { | |
172 | for el in row[..w].iter_mut() { | |
173 | *el = 128; | |
174 | } | |
175 | } | |
176 | for chroma in 0..2 { | |
177 | for row in frm.data[coff[chroma]..].chunks_mut(frm.stride[chroma + 1]).take(h / 2) { | |
178 | for el in row[..w / 2].iter_mut() { | |
179 | *el = 128; | |
180 | } | |
181 | } | |
182 | } | |
183 | } | |
184 | } | |
185 | ||
186 | fn put_block_weighted(dst: &mut [u8], stride: usize, src: &[u8], w: usize, h: usize, wparams: [i8; 3]) { | |
187 | let weight = i16::from(wparams[0]); | |
188 | let offset = i16::from(wparams[1]); | |
189 | let wshift = wparams[2] as u8; | |
190 | let bias = (1 << wshift) >> 1; | |
191 | ||
192 | for (drow, srow) in dst.chunks_mut(stride).zip(src.chunks(16)).take(h) { | |
193 | for (dst, &src) in drow[..w].iter_mut().zip(srow.iter()) { | |
194 | *dst = clip_u8(((i16::from(src) * weight + bias) >> wshift) + offset); | |
195 | } | |
196 | } | |
197 | } | |
198 | ||
199 | fn put_blk_w_2(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) { | |
200 | put_block_weighted(dst, stride, src, 2, h, wparams); | |
201 | } | |
202 | fn put_blk_w_4(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) { | |
203 | put_block_weighted(dst, stride, src, 4, h, wparams); | |
204 | } | |
205 | fn put_blk_w_8(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) { | |
206 | put_block_weighted(dst, stride, src, 8, h, wparams); | |
207 | } | |
208 | fn put_blk_w_16(dst: &mut [u8], stride: usize, src: &[u8], h: usize, wparams: [i8; 3]) { | |
209 | put_block_weighted(dst, stride, src, 16, h, wparams); | |
210 | } | |
211 | ||
212 | fn put_block_weighted2(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], w: usize, h: usize, wparams: [i8; 5]) { | |
213 | let weight0 = i16::from(wparams[0]); | |
214 | let offset0 = i16::from(wparams[1]); | |
215 | let weight1 = i16::from(wparams[2]); | |
216 | let offset1 = i16::from(wparams[3]); | |
217 | let wshift = (wparams[4] as u8) + 1; | |
218 | let offset = (offset0 + offset1 + 1) >> 1; | |
219 | let bias = (1 << wshift) >> 1; | |
220 | ||
221 | for (drow, (srow0, srow1)) in dst.chunks_mut(stride).zip(src0.chunks(16).zip(src1.chunks(16))).take(h) { | |
222 | for (dst, (&src0, &src1)) in drow[..w].iter_mut().zip(srow0.iter().zip(srow1.iter())) { | |
223 | *dst = clip_u8(((i16::from(src0) * weight0 + i16::from(src1) * weight1 + bias) >> wshift) + offset); | |
224 | } | |
225 | } | |
226 | } | |
227 | ||
228 | fn put_blk_w2_2(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) { | |
229 | put_block_weighted2(dst, stride, src0, src1, 2, h, wparams); | |
230 | } | |
231 | fn put_blk_w2_4(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) { | |
232 | put_block_weighted2(dst, stride, src0, src1, 4, h, wparams); | |
233 | } | |
234 | fn put_blk_w2_8(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) { | |
235 | put_block_weighted2(dst, stride, src0, src1, 8, h, wparams); | |
236 | } | |
237 | fn put_blk_w2_16(dst: &mut [u8], stride: usize, src0: &[u8], src1: &[u8], h: usize, wparams: [i8; 5]) { | |
238 | put_block_weighted2(dst, stride, src0, src1, 16, h, wparams); | |
239 | } |