]>
Commit | Line | Data |
---|---|---|
696e4e20 KS |
1 | use nihav_core::frame::*; |
2 | use nihav_codec_support::codecs::blockdsp::*; | |
3 | use nihav_codec_support::codecs::MV; | |
4 | ||
5 | pub const CHROMA_QUANTS: [u8; 52] = [ | |
6 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, | |
7 | 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 29, 30, | |
8 | 31, 32, 32, 33, 34, 34, 35, 35, 36, 36, 37, 37, 37, 38, 38, 38, | |
9 | 39, 39, 39, 39 | |
10 | ]; | |
11 | ||
12 | pub const CHROMA_DC_SCAN: [usize; 4] = [ 0, 1, 2, 3]; | |
13 | pub const ZIGZAG: [usize; 16] = [ | |
14 | 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 | |
15 | ]; | |
16 | pub const ZIGZAG1: [usize; 15] = [ | |
17 | 0, 3, 7, 4, 1, 2, 5, 8, 11, 12, 9, 6, 10, 13, 14 | |
18 | ]; | |
19 | /*pub const IL_SCAN: [usize; 16] = [ | |
20 | 0, 4, 1, 8, 12, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 | |
21 | ];*/ | |
22 | pub const ZIGZAG8X8: [usize; 64] = [ | |
23 | 0, 1, 8, 16, 9, 2, 3, 10, | |
24 | 17, 24, 32, 25, 18, 11, 4, 5, | |
25 | 12, 19, 26, 33, 40, 48, 41, 34, | |
26 | 27, 20, 13, 6, 7, 14, 21, 28, | |
27 | 35, 42, 49, 56, 57, 50, 43, 36, | |
28 | 29, 22, 15, 23, 30, 37, 44, 51, | |
29 | 58, 59, 52, 45, 38, 31, 39, 46, | |
30 | 53, 60, 61, 54, 47, 55, 62, 63 | |
31 | ]; | |
32 | ||
33 | const LEVEL_SCALE: [[i16; 6]; 3] = [ | |
34 | [ 10, 11, 13, 14, 16, 18 ], | |
35 | [ 16, 18, 20, 23, 25, 29 ], | |
36 | [ 13, 14, 16, 18, 20, 23 ] | |
37 | ]; | |
38 | ||
39 | pub fn chroma_dc_transform(blk: &mut [i16; 4], qp: u8) { | |
40 | let t0 = blk[0] + blk[2]; | |
41 | let t1 = blk[0] - blk[2]; | |
42 | let t2 = blk[1] + blk[3]; | |
43 | let t3 = blk[1] - blk[3]; | |
44 | blk[0] = t0 + t2; | |
45 | blk[1] = t0 - t2; | |
46 | blk[2] = t1 + t3; | |
47 | blk[3] = t1 - t3; | |
48 | if qp < 6 { | |
4a1ca15c | 49 | let mul = LEVEL_SCALE[0][qp as usize]; |
696e4e20 KS |
50 | for el in blk.iter_mut() { |
51 | *el = el.wrapping_mul(mul) >> 1; | |
52 | } | |
53 | } else { | |
4a1ca15c | 54 | let mul = LEVEL_SCALE[0][(qp % 6) as usize]; |
696e4e20 KS |
55 | let shift = qp / 6 - 1; |
56 | for el in blk.iter_mut() { | |
57 | *el = el.wrapping_mul(mul) << shift; | |
58 | } | |
59 | } | |
60 | } | |
61 | ||
62 | macro_rules! transform { | |
63 | (luma_dc; $a: expr, $b: expr, $c: expr, $d: expr) => ({ | |
64 | let t0 = $a.wrapping_add($c); | |
65 | let t1 = $a.wrapping_sub($c); | |
66 | let t2 = $b.wrapping_add($d); | |
67 | let t3 = $b.wrapping_sub($d); | |
68 | $a = t0.wrapping_add(t2); | |
69 | $b = t1.wrapping_add(t3); | |
70 | $c = t1.wrapping_sub(t3); | |
71 | $d = t0.wrapping_sub(t2); | |
72 | }); | |
73 | ($a: expr, $b: expr, $c: expr, $d: expr, $shift: expr) => ({ | |
74 | let t0 = $a.wrapping_add($c); | |
75 | let t1 = $a.wrapping_sub($c); | |
76 | let t2 = ($b >> 1).wrapping_sub($d); | |
77 | let t3 = $b.wrapping_add($d >> 1); | |
78 | let bias = 1 << $shift >> 1; | |
79 | $a = t0.wrapping_add(t3).wrapping_add(bias) >> $shift; | |
80 | $b = t1.wrapping_add(t2).wrapping_add(bias) >> $shift; | |
81 | $c = t1.wrapping_sub(t2).wrapping_add(bias) >> $shift; | |
82 | $d = t0.wrapping_sub(t3).wrapping_add(bias) >> $shift; | |
83 | }); | |
84 | ($a: expr, $b: expr, $c: expr, $d: expr, $e: expr, $f: expr, $g: expr, $h: expr) => { | |
85 | let e0 = $a + $e; | |
86 | let e1 = -$d + $f - $h - ($h >> 1); | |
87 | let e2 = $a - $e; | |
88 | let e3 = $b + $h - $d - ($d >> 1); | |
89 | let e4 = ($c >> 1) - $g; | |
90 | let e5 = -$b + $h + $f + ($f >> 1); | |
91 | let e6 = $c + ($g >> 1); | |
92 | let e7 = $d + $f + $b + ($b >> 1); | |
93 | ||
94 | let f0 = e0 + e6; | |
95 | let f1 = e1 + (e7 >> 2); | |
96 | let f2 = e2 + e4; | |
97 | let f3 = e3 + (e5 >> 2); | |
98 | let f4 = e2 - e4; | |
99 | let f5 = (e3 >> 2) - e5; | |
100 | let f6 = e0 - e6; | |
101 | let f7 = e7 - (e1 >> 2); | |
102 | ||
103 | $a = f0 + f7; | |
104 | $b = f2 + f5; | |
105 | $c = f4 + f3; | |
106 | $d = f6 + f1; | |
107 | $e = f6 - f1; | |
108 | $f = f4 - f3; | |
109 | $g = f2 - f5; | |
110 | $h = f0 - f7; | |
111 | }; | |
112 | } | |
113 | ||
114 | pub fn idct_luma_dc(blk: &mut [i16; 16], qp: u8) { | |
115 | if qp < 12 { | |
4a1ca15c | 116 | let mul = LEVEL_SCALE[0][(qp % 6) as usize]; |
696e4e20 KS |
117 | let shift = 2 - qp / 6; |
118 | let bias = 1 << shift >> 1; | |
119 | for el in blk.iter_mut() { | |
120 | *el = el.wrapping_mul(mul).wrapping_add(bias) >> shift; | |
121 | } | |
122 | } else { | |
4a1ca15c | 123 | let mul = LEVEL_SCALE[0][(qp % 6) as usize]; |
696e4e20 KS |
124 | let shift = qp / 6 - 2; |
125 | for el in blk.iter_mut() { | |
126 | *el = el.wrapping_mul(mul) << shift; | |
127 | } | |
128 | } | |
129 | for i in 0..4 { | |
130 | transform!(luma_dc; blk[i], blk[i + 4], blk[i + 8], blk[i + 12]); | |
131 | } | |
132 | for row in blk.chunks_mut(4) { | |
133 | transform!(luma_dc; row[0], row[1], row[2], row[3]); | |
134 | } | |
135 | } | |
136 | ||
137 | pub fn idct(blk: &mut [i16; 16], qp: u8, quant_dc: bool) { | |
138 | const BLK_INDEX: [usize; 16] = [ | |
139 | 0, 2, 0, 2, | |
140 | 2, 1, 2, 1, | |
141 | 0, 2, 0, 2, | |
142 | 2, 1, 2, 1 | |
143 | ]; | |
144 | let qidx = (qp % 6) as usize; | |
145 | let shift = qp / 6; | |
146 | let start = if quant_dc { 0 } else { 1 }; | |
147 | for (el, &idx) in blk.iter_mut().zip(BLK_INDEX.iter()).skip(start) { | |
148 | *el = (*el * LEVEL_SCALE[idx][qidx]) << shift; | |
149 | } | |
150 | for i in 0..4 { | |
151 | transform!(blk[i], blk[i + 4], blk[i + 8], blk[i + 12], 0); | |
152 | } | |
153 | for row in blk.chunks_mut(4) { | |
154 | transform!(row[0], row[1], row[2], row[3], 6); | |
155 | } | |
156 | } | |
157 | ||
158 | pub fn idct_dc(blk: &mut [i16; 16], qp: u8, quant_dc: bool) { | |
159 | let dc = if quant_dc { | |
160 | (blk[0] * LEVEL_SCALE[0][(qp % 6) as usize]) << (qp / 6) | |
161 | } else { | |
162 | blk[0] | |
163 | }; | |
164 | *blk = [(dc + 0x20) >> 6; 16]; | |
165 | } | |
166 | ||
167 | const QMAT_8X8: [[u8; 16]; 6] = [ | |
168 | [ | |
169 | 20, 19, 25, 24, | |
170 | 19, 18, 24, 18, | |
171 | 25, 24, 32, 24, | |
172 | 24, 18, 24, 18 | |
173 | ], [ | |
174 | 22, 21, 28, 26, | |
175 | 21, 19, 26, 19, | |
176 | 28, 26, 35, 26, | |
177 | 26, 19, 26, 19 | |
178 | ], [ | |
179 | 26, 24, 33, 31, | |
180 | 24, 23, 31, 23, | |
181 | 33, 31, 42, 31, | |
182 | 31, 23, 31, 23 | |
183 | ], [ | |
184 | 28, 26, 35, 33, | |
185 | 26, 25, 33, 25, | |
186 | 35, 33, 45, 33, | |
187 | 33, 25, 33, 25 | |
188 | ], [ | |
189 | 32, 30, 40, 38, | |
190 | 30, 28, 38, 28, | |
191 | 40, 38, 51, 38, | |
192 | 38, 28, 38, 28 | |
193 | ], [ | |
194 | 36, 34, 46, 43, | |
195 | 34, 32, 43, 32, | |
196 | 46, 43, 58, 43, | |
197 | 43, 32, 43, 32 | |
198 | ] | |
199 | ]; | |
200 | ||
201 | pub fn dequant8x8(blk: &mut [i16; 64], slist: &[u8; 64]) { | |
202 | for (el, &scan) in blk.iter_mut().zip(ZIGZAG8X8.iter()) { | |
203 | if *el != 0 { | |
204 | *el = el.wrapping_mul(i16::from(slist[scan])); | |
205 | } | |
206 | } | |
207 | } | |
208 | ||
209 | pub fn idct8x8(blk: &mut [i16; 64], qp: u8) { | |
210 | let mut tmp = [0i32; 64]; | |
211 | let qmat = &QMAT_8X8[(qp % 6) as usize]; | |
212 | if qp >= 36 { | |
213 | let shift = qp / 6 - 6; | |
214 | for (i, (dst, &src)) in tmp.iter_mut().zip(blk.iter()).enumerate() { | |
215 | let x = i & 7; | |
216 | let y = i >> 3; | |
217 | let idx = (x & 3) + (y & 3) * 4; | |
218 | *dst = i32::from(src).wrapping_mul(i32::from(qmat[idx])) << shift; | |
219 | } | |
220 | } else { | |
221 | let shift = 6 - qp / 6; | |
222 | let bias = (1 << shift) >> 1; | |
223 | for (i, (dst, &src)) in tmp.iter_mut().zip(blk.iter()).enumerate() { | |
224 | let x = i & 7; | |
225 | let y = i >> 3; | |
226 | let idx = (x & 3) + (y & 3) * 4; | |
227 | *dst = i32::from(src).wrapping_mul(i32::from(qmat[idx])).wrapping_add(bias) >> shift; | |
228 | } | |
229 | } | |
230 | for row in tmp.chunks_mut(8) { | |
231 | transform!(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]); | |
232 | } | |
233 | for col in 0..8 { | |
234 | transform!(tmp[col], tmp[col + 8], tmp[col + 8 * 2], tmp[col + 8 * 3], | |
235 | tmp[col + 8 * 4], tmp[col + 8 * 5], tmp[col + 8 * 6], tmp[col + 8 * 7]); | |
236 | } | |
237 | for (dst, &src) in blk.iter_mut().zip(tmp.iter()) { | |
238 | *dst = ((src + 0x20) >> 6) as i16; | |
239 | } | |
240 | } | |
241 | ||
242 | pub fn add_coeffs(dst: &mut [u8], offset: usize, stride: usize, coeffs: &[i16]) { | |
243 | let out = &mut dst[offset..][..stride * 3 + 4]; | |
244 | for (line, src) in out.chunks_mut(stride).take(4).zip(coeffs.chunks(4)) { | |
245 | for (dst, src) in line.iter_mut().take(4).zip(src.iter()) { | |
246 | *dst = (i32::from(*dst) + i32::from(*src)).max(0).min(255) as u8; | |
247 | } | |
248 | } | |
249 | } | |
250 | ||
251 | pub fn add_coeffs8(dst: &mut [u8], offset: usize, stride: usize, coeffs: &[i16; 64]) { | |
252 | let out = &mut dst[offset..]; | |
253 | for (line, src) in out.chunks_mut(stride).take(8).zip(coeffs.chunks(8)) { | |
254 | for (dst, src) in line.iter_mut().take(8).zip(src.iter()) { | |
255 | *dst = (i32::from(*dst) + i32::from(*src)).max(0).min(255) as u8; | |
256 | } | |
257 | } | |
258 | } | |
259 | ||
260 | pub fn avg(dst: &mut [u8], dstride: usize, | |
261 | src: &[u8], sstride: usize, bw: usize, bh: usize) { | |
262 | for (dline, sline) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(bh) { | |
263 | for (dst, src) in dline.iter_mut().zip(sline.iter()).take(bw) { | |
264 | *dst = ((u16::from(*dst) + u16::from(*src) + 1) >> 1) as u8; | |
265 | } | |
266 | } | |
267 | } | |
268 | ||
269 | fn clip8(val: i16) -> u8 { val.max(0).min(255) as u8 } | |
270 | ||
271 | fn ipred_dc128(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize) { | |
272 | for _ in 0..bsize { | |
273 | for x in 0..bsize { buf[idx + x] = 128; } | |
274 | idx += stride; | |
275 | } | |
276 | } | |
277 | fn ipred_ver(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize) { | |
278 | let oidx = idx - stride; | |
279 | for _ in 0..bsize { | |
280 | for x in 0..bsize { buf[idx + x] = buf[oidx + x]; } | |
281 | idx += stride; | |
282 | } | |
283 | } | |
284 | fn ipred_hor(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize) { | |
285 | for _ in 0..bsize { | |
286 | for x in 0..bsize { buf[idx + x] = buf[idx - 1]; } | |
287 | idx += stride; | |
288 | } | |
289 | } | |
290 | fn ipred_dc(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize, shift: u8) { | |
291 | let mut adc: u16 = 0; | |
292 | for i in 0..bsize { adc += u16::from(buf[idx - stride + i]); } | |
293 | for i in 0..bsize { adc += u16::from(buf[idx - 1 + i * stride]); } | |
294 | let dc = ((adc + (1 << (shift - 1))) >> shift) as u8; | |
295 | ||
296 | for _ in 0..bsize { | |
297 | for x in 0..bsize { buf[idx + x] = dc; } | |
298 | idx += stride; | |
299 | } | |
300 | } | |
301 | fn ipred_left_dc(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize, shift: u8) { | |
302 | let mut adc: u16 = 0; | |
303 | for i in 0..bsize { adc += u16::from(buf[idx - 1 + i * stride]); } | |
304 | let dc = ((adc + (1 << (shift - 1))) >> shift) as u8; | |
305 | ||
306 | for _ in 0..bsize { | |
307 | for x in 0..bsize { buf[idx + x] = dc; } | |
308 | idx += stride; | |
309 | } | |
310 | } | |
311 | fn ipred_top_dc(buf: &mut [u8], mut idx: usize, stride: usize, bsize: usize, shift: u8) { | |
312 | let mut adc: u16 = 0; | |
313 | for i in 0..bsize { adc += u16::from(buf[idx - stride + i]); } | |
314 | let dc = ((adc + (1 << (shift - 1))) >> shift) as u8; | |
315 | ||
316 | for _ in 0..bsize { | |
317 | for x in 0..bsize { buf[idx + x] = dc; } | |
318 | idx += stride; | |
319 | } | |
320 | } | |
321 | ||
322 | fn load_top(dst: &mut [u16], buf: &mut [u8], idx: usize, stride: usize, len: usize) { | |
323 | for i in 0..len { dst[i] = u16::from(buf[idx - stride + i]); } | |
324 | } | |
325 | fn load_left(dst: &mut [u16], buf: &mut [u8], idx: usize, stride: usize, len: usize) { | |
326 | for i in 0..len { dst[i] = u16::from(buf[idx - 1 + i * stride]); } | |
327 | } | |
328 | ||
329 | fn ipred_4x4_ver(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
330 | ipred_ver(buf, idx, stride, 4); | |
331 | } | |
332 | fn ipred_4x4_hor(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
333 | ipred_hor(buf, idx, stride, 4); | |
334 | } | |
335 | fn ipred_4x4_diag_down_left(buf: &mut [u8], idx: usize, stride: usize, tr: &[u8]) { | |
336 | let mut t: [u16; 9] = [0; 9]; | |
337 | load_top(&mut t, buf, idx, stride, 4); | |
338 | for i in 0..4 { | |
339 | t[i + 4] = u16::from(tr[i]); | |
340 | } | |
341 | t[8] = t[7]; | |
342 | ||
343 | let dst = &mut buf[idx..]; | |
344 | for i in 0..4 { | |
345 | dst[i] = ((t[i] + 2 * t[i + 1] + t[i + 2] + 2) >> 2) as u8; | |
346 | } | |
347 | let dst = &mut buf[idx + stride..]; | |
348 | for i in 0..4 { | |
349 | dst[i] = ((t[i + 1] + 2 * t[i + 2] + t[i + 3] + 2) >> 2) as u8; | |
350 | } | |
351 | let dst = &mut buf[idx + stride * 2..]; | |
352 | for i in 0..4 { | |
353 | dst[i] = ((t[i + 2] + 2 * t[i + 3] + t[i + 4] + 2) >> 2) as u8; | |
354 | } | |
355 | let dst = &mut buf[idx + stride * 3..]; | |
356 | for i in 0..4 { | |
357 | dst[i] = ((t[i + 3] + 2 * t[i + 4] + t[i + 5] + 2) >> 2) as u8; | |
358 | } | |
359 | } | |
360 | fn ipred_4x4_diag_down_right(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
361 | let mut t: [u16; 5] = [0; 5]; | |
362 | let mut l: [u16; 5] = [0; 5]; | |
363 | load_top(&mut t, buf, idx - 1, stride, 5); | |
364 | load_left(&mut l, buf, idx - stride, stride, 5); | |
365 | let dst = &mut buf[idx..]; | |
366 | ||
367 | for j in 0..4 { | |
368 | for i in 0..j { | |
369 | dst[i + j * stride] = ((l[j - i - 1] + 2 * l[j - i] + l[j - i + 1] + 2) >> 2) as u8; | |
370 | } | |
371 | dst[j + j * stride] = ((l[1] + 2 * l[0] + t[1] + 2) >> 2) as u8; | |
372 | for i in (j+1)..4 { | |
373 | dst[i + j * stride] = ((t[i - j - 1] + 2 * t[i - j] + t[i - j + 1] + 2) >> 2) as u8; | |
374 | } | |
375 | } | |
376 | } | |
377 | fn ipred_4x4_ver_right(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
378 | let mut t: [u16; 5] = [0; 5]; | |
379 | let mut l: [u16; 5] = [0; 5]; | |
380 | load_top(&mut t, buf, idx - 1, stride, 5); | |
381 | load_left(&mut l, buf, idx - stride, stride, 5); | |
382 | let dst = &mut buf[idx..]; | |
383 | ||
384 | for j in 0..4 { | |
385 | for i in 0..4 { | |
386 | let zvr = ((2 * i) as i8) - (j as i8); | |
387 | let pix; | |
388 | if zvr >= 0 { | |
389 | if (zvr & 1) == 0 { | |
390 | pix = (t[i - (j >> 1)] + t[i - (j >> 1) + 1] + 1) >> 1; | |
391 | } else { | |
392 | pix = (t[i - (j >> 1) - 1] + 2 * t[i - (j >> 1)] + t[i - (j >> 1) + 1] + 2) >> 2; | |
393 | } | |
394 | } else { | |
395 | if zvr == -1 { | |
396 | pix = (l[1] + 2 * l[0] + t[1] + 2) >> 2; | |
397 | } else { | |
398 | pix = (l[j] + 2 * l[j - 1] + l[j - 2] + 2) >> 2; | |
399 | } | |
400 | } | |
401 | dst[i + j * stride] = pix as u8; | |
402 | } | |
403 | } | |
404 | } | |
405 | fn ipred_4x4_ver_left(buf: &mut [u8], idx: usize, stride: usize, tr: &[u8]) { | |
406 | let mut t: [u16; 8] = [0; 8]; | |
407 | load_top(&mut t, buf, idx, stride, 4); | |
408 | for i in 0..4 { t[i + 4] = u16::from(tr[i]); } | |
409 | let dst = &mut buf[idx..]; | |
410 | ||
411 | dst[0 + 0 * stride] = ((t[0] + t[1] + 1) >> 1) as u8; | |
412 | let pix = ((t[1] + t[2] + 1) >> 1) as u8; | |
413 | dst[1 + 0 * stride] = pix; | |
414 | dst[0 + 2 * stride] = pix; | |
415 | let pix = ((t[2] + t[3] + 1) >> 1) as u8; | |
416 | dst[2 + 0 * stride] = pix; | |
417 | dst[1 + 2 * stride] = pix; | |
418 | let pix = ((t[3] + t[4] + 1) >> 1) as u8; | |
419 | dst[3 + 0 * stride] = pix; | |
420 | dst[2 + 2 * stride] = pix; | |
421 | dst[3 + 2 * stride] = ((t[4] + t[5] + 1) >> 1) as u8; | |
422 | dst[0 + 1 * stride] = ((t[0] + 2*t[1] + t[2] + 2) >> 2) as u8; | |
423 | let pix = ((t[1] + 2*t[2] + t[3] + 2) >> 2) as u8; | |
424 | dst[1 + 1 * stride] = pix; | |
425 | dst[0 + 3 * stride] = pix; | |
426 | let pix = ((t[2] + 2*t[3] + t[4] + 2) >> 2) as u8; | |
427 | dst[2 + 1 * stride] = pix; | |
428 | dst[1 + 3 * stride] = pix; | |
429 | let pix = ((t[3] + 2*t[4] + t[5] + 2) >> 2) as u8; | |
430 | dst[3 + 1 * stride] = pix; | |
431 | dst[2 + 3 * stride] = pix; | |
432 | dst[3 + 3 * stride] = ((t[4] + 2*t[5] + t[6] + 2) >> 2) as u8; | |
433 | } | |
434 | fn ipred_4x4_hor_down(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
435 | let mut t: [u16; 5] = [0; 5]; | |
436 | let mut l: [u16; 5] = [0; 5]; | |
437 | load_top(&mut t, buf, idx - 1, stride, 5); | |
438 | load_left(&mut l, buf, idx - stride, stride, 5); | |
439 | let dst = &mut buf[idx..]; | |
440 | ||
441 | for j in 0..4 { | |
442 | for i in 0..4 { | |
443 | let zhd = ((2 * j) as i8) - (i as i8); | |
444 | let pix; | |
445 | if zhd >= 0 { | |
446 | if (zhd & 1) == 0 { | |
447 | pix = (l[j - (i >> 1)] + l[j - (i >> 1) + 1] + 1) >> 1; | |
448 | } else { | |
449 | pix = (l[j - (i >> 1) - 1] + 2 * l[j - (i >> 1)] + l[j - (i >> 1) + 1] + 2) >> 2; | |
450 | } | |
451 | } else { | |
452 | if zhd == -1 { | |
453 | pix = (l[1] + 2 * l[0] + t[1] + 2) >> 2; | |
454 | } else { | |
455 | pix = (t[i - 2] + 2 * t[i - 1] + t[i] + 2) >> 2; | |
456 | } | |
457 | } | |
458 | dst[i + j * stride] = pix as u8; | |
459 | } | |
460 | } | |
461 | } | |
462 | fn ipred_4x4_hor_up(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
463 | let mut l: [u16; 8] = [0; 8]; | |
464 | load_left(&mut l, buf, idx, stride, 8); | |
465 | let dst = &mut buf[idx..]; | |
466 | ||
467 | dst[0 + 0 * stride] = ((l[0] + l[1] + 1) >> 1) as u8; | |
468 | dst[1 + 0 * stride] = ((l[0] + 2*l[1] + l[2] + 2) >> 2) as u8; | |
469 | let pix = ((l[1] + l[2] + 1) >> 1) as u8; | |
470 | dst[2 + 0 * stride] = pix; | |
471 | dst[0 + 1 * stride] = pix; | |
472 | let pix = ((l[1] + 2*l[2] + l[3] + 2) >> 2) as u8; | |
473 | dst[3 + 0 * stride] = pix; | |
474 | dst[1 + 1 * stride] = pix; | |
475 | let pix = ((l[2] + l[3] + 1) >> 1) as u8; | |
476 | dst[2 + 1 * stride] = pix; | |
477 | dst[0 + 2 * stride] = pix; | |
478 | let pix = ((l[2] + 3*l[3] + 2) >> 2) as u8; | |
479 | dst[3 + 1 * stride] = pix; | |
480 | dst[1 + 2 * stride] = pix; | |
481 | dst[3 + 2 * stride] = l[3] as u8; | |
482 | dst[1 + 3 * stride] = l[3] as u8; | |
483 | dst[0 + 3 * stride] = l[3] as u8; | |
484 | dst[2 + 2 * stride] = l[3] as u8; | |
485 | dst[2 + 3 * stride] = l[3] as u8; | |
486 | dst[3 + 3 * stride] = l[3] as u8; | |
487 | } | |
488 | fn ipred_4x4_dc(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
489 | ipred_dc(buf, idx, stride, 4, 3); | |
490 | } | |
491 | fn ipred_4x4_left_dc(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
492 | ipred_left_dc(buf, idx, stride, 4, 2); | |
493 | } | |
494 | fn ipred_4x4_top_dc(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
495 | ipred_top_dc(buf, idx, stride, 4, 2); | |
496 | } | |
497 | fn ipred_4x4_dc128(buf: &mut [u8], idx: usize, stride: usize, _tr: &[u8]) { | |
498 | ipred_dc128(buf, idx, stride, 4); | |
499 | } | |
500 | ||
501 | pub struct IPred8Context { | |
502 | pub t: [u8; 16], | |
503 | pub l: [u8; 8], | |
504 | pub tl: u8, | |
505 | } | |
506 | ||
507 | impl IPred8Context { | |
508 | pub fn new() -> Self { | |
509 | Self { | |
510 | t: [128; 16], | |
511 | l: [128; 8], | |
512 | tl: 128, | |
513 | } | |
514 | } | |
515 | pub fn fill(&mut self, buf: &mut [u8], idx: usize, stride: usize, has_t: bool, has_tr: bool, has_l: bool, has_tl: bool) { | |
516 | let mut t = [0x80u8; 19]; | |
517 | let mut l = [0x80u8; 11]; | |
518 | if has_t { | |
519 | t[1..8 + 1].copy_from_slice(&buf[idx - stride..][..8]); | |
520 | } | |
521 | if has_tr { | |
522 | t[8 + 1..16 + 1].copy_from_slice(&buf[idx - stride + 8..][..8]); | |
523 | t[16 + 1] = t[15 + 1]; | |
524 | t[17 + 1] = t[15 + 1]; | |
525 | } else { | |
526 | let (t0, t1) = t.split_at_mut(8 + 1); | |
527 | for el in t1.iter_mut() { | |
528 | *el = t0[7 + 1]; | |
529 | } | |
530 | } | |
531 | if has_l { | |
532 | for i in 0..8 { | |
533 | l[i + 1] = buf[idx - 1 + stride * i]; | |
534 | } | |
535 | l[8 + 1] = l[7 + 1]; | |
536 | l[9 + 1] = l[7 + 1]; | |
537 | } | |
538 | if has_tl { | |
539 | t[0] = buf[idx - 1 - stride]; | |
540 | l[0] = buf[idx - 1 - stride]; | |
541 | } else { | |
542 | t[0] = t[1]; | |
543 | l[0] = l[1]; | |
544 | } | |
545 | ||
546 | for i in 0..16 { | |
547 | self.t[i] = ((u16::from(t[i]) + 2 * u16::from(t[i + 1]) + u16::from(t[i + 2]) + 2) >> 2) as u8; | |
548 | } | |
549 | for i in 0..8 { | |
550 | self.l[i] = ((u16::from(l[i]) + 2 * u16::from(l[i + 1]) + u16::from(l[i + 2]) + 2) >> 2) as u8; | |
551 | } | |
552 | self.tl = if has_t && has_l { | |
553 | ((u16::from(t[1]) + 2 * u16::from(t[0]) + u16::from(l[1]) + 2) >> 2) as u8 | |
554 | } else if has_t { | |
555 | ((3 * u16::from(t[0]) + u16::from(t[1]) + 2) >> 2) as u8 | |
556 | } else if has_l { | |
557 | ((3 * u16::from(l[0]) + u16::from(l[1]) + 2) >> 2) as u8 | |
558 | } else { | |
559 | t[0] | |
560 | }; | |
561 | } | |
562 | } | |
563 | ||
564 | fn ipred_y_8x8_ver(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
565 | for row in buf.chunks_mut(stride).take(8) { | |
566 | row[..8].copy_from_slice(&ctx.t[..8]); | |
567 | } | |
568 | } | |
569 | fn ipred_y_8x8_hor(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
570 | for (row, &l) in buf.chunks_mut(stride).zip(ctx.l.iter()).take(8) { | |
571 | row[..8].copy_from_slice(&[l; 8]); | |
572 | } | |
573 | } | |
574 | fn ipred_y_8x8_diag_down_left(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
575 | let mut t = [0u16; 16]; | |
576 | for (dt, &st) in t.iter_mut().zip(ctx.t.iter()) { | |
577 | *dt = u16::from(st); | |
578 | } | |
579 | ||
580 | for (y, row) in buf.chunks_mut(stride).take(8).enumerate() { | |
581 | for (x, pix) in row.iter_mut().take(8).enumerate() { | |
582 | *pix = ((if (x != 7) || (y != 7) { | |
583 | t[x + y] + 2 * t[x + y + 1] + t[x + y + 2] | |
584 | } else { | |
585 | t[14] + 3 * t[15] | |
586 | } + 2) >> 2) as u8; | |
587 | } | |
588 | } | |
589 | } | |
590 | fn ipred_y_8x8_diag_down_right(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
591 | let mut t = [0u16; 9]; | |
592 | t[0] = u16::from(ctx.tl); | |
593 | for (dt, &st) in t[1..].iter_mut().zip(ctx.t.iter()) { | |
594 | *dt = u16::from(st); | |
595 | } | |
596 | let mut l = [0u16; 9]; | |
597 | l[0] = u16::from(ctx.tl); | |
598 | for (dl, &sl) in l[1..].iter_mut().zip(ctx.l.iter()) { | |
599 | *dl = u16::from(sl); | |
600 | } | |
601 | let diag = t[1] + 2 * t[0] + l[1]; | |
602 | ||
603 | for (y, row) in buf.chunks_mut(stride).take(8).enumerate() { | |
604 | for (x, pix) in row.iter_mut().take(8).enumerate() { | |
605 | *pix = ((if x > y { | |
606 | t[x - y - 1] + 2 * t[x - y] + t[x - y + 1] | |
607 | } else if x < y { | |
608 | l[y - x - 1] + 2 * l[y - x] + l[y - x + 1] | |
609 | } else { | |
610 | diag | |
611 | } + 2) >> 2) as u8; | |
612 | } | |
613 | } | |
614 | } | |
615 | fn ipred_y_8x8_ver_right(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
616 | let mut t = [0u16; 9]; | |
617 | t[0] = u16::from(ctx.tl); | |
618 | for (dt, &st) in t[1..].iter_mut().zip(ctx.t.iter()) { | |
619 | *dt = u16::from(st); | |
620 | } | |
621 | let mut l = [0u16; 9]; | |
622 | l[0] = u16::from(ctx.tl); | |
623 | for (dl, &sl) in l[1..].iter_mut().zip(ctx.l.iter()) { | |
624 | *dl = u16::from(sl); | |
625 | } | |
626 | ||
627 | for (y, row) in buf.chunks_mut(stride).take(8).enumerate() { | |
628 | for (x, pix) in row.iter_mut().take(8).enumerate() { | |
629 | let zvr = 2 * (x as i8) - (y as i8); | |
630 | *pix = if zvr >= 0 { | |
631 | let ix = x - (y >> 1); | |
632 | if (zvr & 1) == 0 { | |
633 | (t[ix] + t[ix + 1] + 1) >> 1 | |
634 | } else { | |
635 | (t[ix - 1] + 2 * t[ix] + t[ix + 1] + 2) >> 2 | |
636 | } | |
637 | } else if zvr == -1 { | |
638 | (l[1] + 2 * l[0] + t[0] + 2) >> 2 | |
639 | } else { | |
640 | let ix = y - 2 * x; | |
641 | (l[ix] + 2 * l[ix - 1] + l[ix - 2] + 2) >> 2 | |
642 | } as u8; | |
643 | } | |
644 | } | |
645 | } | |
646 | fn ipred_y_8x8_ver_left(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
647 | let mut t = [0u16; 16]; | |
648 | for (dt, &st) in t.iter_mut().zip(ctx.t.iter()) { | |
649 | *dt = u16::from(st); | |
650 | } | |
651 | ||
652 | for (y, row) in buf.chunks_mut(stride).take(8).enumerate() { | |
653 | for (x, pix) in row.iter_mut().take(8).enumerate() { | |
654 | let ix = x + (y >> 1); | |
655 | *pix = if (y & 1) == 0 { | |
656 | (t[ix] + t[ix + 1] + 1) >> 1 | |
657 | } else { | |
658 | (t[ix] + 2 * t[ix + 1] + t[ix + 2] + 2) >> 2 | |
659 | } as u8; | |
660 | } | |
661 | } | |
662 | ||
663 | } | |
664 | fn ipred_y_8x8_hor_down(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
665 | let mut t = [0u16; 9]; | |
666 | t[0] = u16::from(ctx.tl); | |
667 | for (dt, &st) in t[1..].iter_mut().zip(ctx.t.iter()) { | |
668 | *dt = u16::from(st); | |
669 | } | |
670 | let mut l = [0u16; 9]; | |
671 | l[0] = u16::from(ctx.tl); | |
672 | for (dl, &sl) in l[1..].iter_mut().zip(ctx.l.iter()) { | |
673 | *dl = u16::from(sl); | |
674 | } | |
675 | ||
676 | for (y, row) in buf.chunks_mut(stride).take(8).enumerate() { | |
677 | for (x, pix) in row.iter_mut().take(8).enumerate() { | |
678 | let zhd = 2 * (y as i8) - (x as i8); | |
679 | *pix = if zhd >= 0 { | |
680 | let ix = y - (x >> 1); | |
681 | if (zhd & 1) == 0 { | |
682 | (l[ix] + l[ix + 1] + 1) >> 1 | |
683 | } else { | |
684 | (l[ix - 1] + 2 * l[ix] + l[ix + 1] + 2) >> 2 | |
685 | } | |
686 | } else if zhd == -1 { | |
687 | (l[1] + 2 * l[0] + t[0] + 2) >> 2 | |
688 | } else { | |
689 | let ix = x - 2 * y; | |
690 | (t[ix] + 2 * t[ix - 1] + t[ix - 2] + 2) >> 2 | |
691 | } as u8; | |
692 | } | |
693 | } | |
694 | } | |
695 | fn ipred_y_8x8_hor_up(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
696 | let mut l = [0u16; 8]; | |
697 | for (dl, &sl) in l.iter_mut().zip(ctx.l.iter()) { | |
698 | *dl = u16::from(sl); | |
699 | } | |
700 | ||
701 | for (y, row) in buf.chunks_mut(stride).take(8).enumerate() { | |
702 | for (x, pix) in row.iter_mut().take(8).enumerate() { | |
703 | let zhu = x + 2 * y; | |
704 | let ix = y + (x >> 1); | |
705 | *pix = if zhu > 13 { | |
706 | l[7] | |
707 | } else if zhu == 13 { | |
708 | (l[6] + 3 * l[7] + 2) >> 2 | |
709 | } else if (zhu & 1) != 0 { | |
710 | (l[ix] + 2 * l[ix + 1] + l[ix + 2] + 2) >> 2 | |
711 | } else { | |
712 | (l[ix] + l[ix + 1] + 1) >> 1 | |
713 | } as u8; | |
714 | } | |
715 | } | |
716 | } | |
717 | fn ipred_y_8x8_dc(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
718 | let mut sum = 0u16; | |
719 | for &t in ctx.t[..8].iter() { | |
720 | sum += u16::from(t); | |
721 | } | |
722 | for &l in ctx.l[..8].iter() { | |
723 | sum += u16::from(l); | |
724 | } | |
725 | let dc = ((sum + 8) >> 4) as u8; | |
726 | for row in buf.chunks_mut(stride).take(8) { | |
727 | for pix in row.iter_mut().take(8) { | |
728 | *pix = dc; | |
729 | } | |
730 | } | |
731 | } | |
732 | fn ipred_y_8x8_left_dc(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
733 | let mut sum = 0u16; | |
734 | for &l in ctx.l[..8].iter() { | |
735 | sum += u16::from(l); | |
736 | } | |
737 | let dc = ((sum + 4) >> 3) as u8; | |
738 | for row in buf.chunks_mut(stride).take(8) { | |
739 | for pix in row.iter_mut().take(8) { | |
740 | *pix = dc; | |
741 | } | |
742 | } | |
743 | } | |
744 | fn ipred_y_8x8_top_dc(buf: &mut [u8], stride: usize, ctx: &IPred8Context) { | |
745 | let mut sum = 0u16; | |
746 | for &t in ctx.t[..8].iter() { | |
747 | sum += u16::from(t); | |
748 | } | |
749 | let dc = ((sum + 4) >> 3) as u8; | |
750 | for row in buf.chunks_mut(stride).take(8) { | |
751 | for pix in row.iter_mut().take(8) { | |
752 | *pix = dc; | |
753 | } | |
754 | } | |
755 | } | |
756 | fn ipred_y_8x8_dc128(buf: &mut [u8], stride: usize, _ctx: &IPred8Context) { | |
757 | ipred_dc128(buf, 0, stride, 8); | |
758 | } | |
759 | ||
760 | fn ipred_8x8_ver(buf: &mut [u8], idx: usize, stride: usize) { | |
761 | ipred_ver(buf, idx, stride, 8); | |
762 | } | |
763 | fn ipred_8x8_hor(buf: &mut [u8], idx: usize, stride: usize) { | |
764 | ipred_hor(buf, idx, stride, 8); | |
765 | } | |
766 | fn ipred_8x8_dc(buf: &mut [u8], idx: usize, stride: usize) { | |
767 | let mut t: [u16; 8] = [0; 8]; | |
768 | load_top(&mut t, buf, idx, stride, 8); | |
769 | let mut l: [u16; 8] = [0; 8]; | |
770 | load_left(&mut l, buf, idx, stride, 8); | |
771 | ||
772 | let dc0 = ((t[0] + t[1] + t[2] + t[3] + l[0] + l[1] + l[2] + l[3] + 4) >> 3) as u8; | |
773 | let sum1 = t[4] + t[5] + t[6] + t[7]; | |
774 | let dc1 = ((sum1 + 2) >> 2) as u8; | |
775 | let sum2 = l[4] + l[5] + l[6] + l[7]; | |
776 | let dc2 = ((sum2 + 2) >> 2) as u8; | |
777 | let dc3 = ((sum1 + sum2 + 4) >> 3) as u8; | |
778 | ||
779 | let dst = &mut buf[idx..]; | |
780 | for row in dst.chunks_mut(stride).take(4) { | |
781 | row[..4].copy_from_slice(&[dc0; 4]); | |
782 | row[4..8].copy_from_slice(&[dc1; 4]); | |
783 | } | |
784 | for row in dst.chunks_mut(stride).skip(4).take(4) { | |
785 | row[..4].copy_from_slice(&[dc2; 4]); | |
786 | row[4..8].copy_from_slice(&[dc3; 4]); | |
787 | } | |
788 | } | |
789 | fn ipred_8x8_left_dc(buf: &mut [u8], idx: usize, stride: usize) { | |
790 | let mut left_dc0 = 0; | |
791 | let mut left_dc1 = 0; | |
792 | for row in buf[idx - 1..].chunks(stride).take(4) { | |
793 | left_dc0 += u16::from(row[0]); | |
794 | } | |
795 | for row in buf[idx - 1..].chunks(stride).skip(4).take(4) { | |
796 | left_dc1 += u16::from(row[0]); | |
797 | } | |
798 | let dc0 = ((left_dc0 + 2) >> 2) as u8; | |
799 | let dc2 = ((left_dc1 + 2) >> 2) as u8; | |
800 | for row in buf[idx..].chunks_mut(stride).take(4) { | |
801 | row[..8].copy_from_slice(&[dc0; 8]); | |
802 | } | |
803 | for row in buf[idx..].chunks_mut(stride).skip(4).take(4) { | |
804 | row[..8].copy_from_slice(&[dc2; 8]); | |
805 | } | |
806 | } | |
807 | fn ipred_8x8_top_dc(buf: &mut [u8], idx: usize, stride: usize) { | |
808 | ipred_top_dc(buf, idx, stride, 4, 2); | |
809 | ipred_top_dc(buf, idx + 4, stride, 4, 2); | |
810 | ipred_top_dc(buf, idx + 4 * stride, stride, 4, 2); | |
811 | ipred_top_dc(buf, idx + 4 + 4 * stride, stride, 4, 2); | |
812 | } | |
813 | fn ipred_8x8_dc128(buf: &mut [u8], idx: usize, stride: usize) { | |
814 | ipred_dc128(buf, idx, stride, 8); | |
815 | } | |
816 | fn ipred_8x8_plane(buf: &mut [u8], idx: usize, stride: usize) { | |
817 | let mut h: i32 = 0; | |
818 | let mut v: i32 = 0; | |
819 | let idx0 = idx + 3 - stride; | |
820 | let mut idx1 = idx + 4 * stride - 1; | |
821 | let mut idx2 = idx + 2 * stride - 1; | |
822 | for i in 0..4 { | |
823 | let i1 = (i + 1) as i32; | |
824 | h += i1 * (i32::from(buf[idx0 + i + 1]) - i32::from(buf[idx0 - i - 1])); | |
825 | v += i1 * (i32::from(buf[idx1]) - i32::from(buf[idx2])); | |
826 | idx1 += stride; | |
827 | idx2 -= stride; | |
828 | } | |
829 | let b = (17 * h + 16) >> 5; | |
830 | let c = (17 * v + 16) >> 5; | |
831 | let mut a = 16 * (i32::from(buf[idx - 1 + 7 * stride]) + i32::from(buf[idx + 7 - stride])) - 3 * (b + c) + 16; | |
832 | for line in buf[idx..].chunks_mut(stride).take(8) { | |
833 | let mut acc = a; | |
834 | for el in line.iter_mut().take(8) { | |
835 | *el = clip8((acc >> 5) as i16); | |
836 | acc += b; | |
837 | } | |
838 | a += c; | |
839 | } | |
840 | } | |
841 | ||
842 | fn ipred_16x16_ver(buf: &mut [u8], idx: usize, stride: usize) { | |
843 | ipred_ver(buf, idx, stride, 16); | |
844 | } | |
845 | fn ipred_16x16_hor(buf: &mut [u8], idx: usize, stride: usize) { | |
846 | ipred_hor(buf, idx, stride, 16); | |
847 | } | |
848 | fn ipred_16x16_dc(buf: &mut [u8], idx: usize, stride: usize) { | |
849 | ipred_dc(buf, idx, stride, 16, 5); | |
850 | } | |
851 | fn ipred_16x16_left_dc(buf: &mut [u8], idx: usize, stride: usize) { | |
852 | ipred_left_dc(buf, idx, stride, 16, 4); | |
853 | } | |
854 | fn ipred_16x16_top_dc(buf: &mut [u8], idx: usize, stride: usize) { | |
855 | ipred_top_dc(buf, idx, stride, 16, 4); | |
856 | } | |
857 | fn ipred_16x16_dc128(buf: &mut [u8], idx: usize, stride: usize) { | |
858 | ipred_dc128(buf, idx, stride, 16); | |
859 | } | |
860 | fn ipred_16x16_plane(buf: &mut [u8], idx: usize, stride: usize) { | |
861 | let idx0 = idx + 7 - stride; | |
862 | let mut idx1 = idx + 8 * stride - 1; | |
863 | let mut idx2 = idx1 - 2 * stride; | |
864 | ||
865 | let mut h = i32::from(buf[idx0 + 1]) - i32::from(buf[idx0 - 1]); | |
866 | let mut v = i32::from(buf[idx1]) - i32::from(buf[idx2]); | |
867 | ||
868 | for k in 2..9 { | |
869 | idx1 += stride; | |
870 | idx2 -= stride; | |
871 | h += (k as i32) * (i32::from(buf[idx0 + k]) - i32::from(buf[idx0 - k])); | |
872 | v += (k as i32) * (i32::from(buf[idx1]) - i32::from(buf[idx2])); | |
873 | } | |
874 | h = (5 * h + 32) >> 6; | |
875 | v = (5 * v + 32) >> 6; | |
876 | ||
877 | let mut a = 16 * (i32::from(buf[idx - 1 + 15 * stride]) + i32::from(buf[idx + 15 - stride]) + 1) - 7 * (v + h); | |
878 | ||
879 | for row in buf[idx..].chunks_mut(stride).take(16) { | |
880 | let mut b = a; | |
881 | a += v; | |
882 | ||
883 | for dst in row.chunks_exact_mut(4).take(4) { | |
884 | dst[0] = clip8(((b ) >> 5) as i16); | |
885 | dst[1] = clip8(((b + h) >> 5) as i16); | |
886 | dst[2] = clip8(((b + 2*h) >> 5) as i16); | |
887 | dst[3] = clip8(((b + 3*h) >> 5) as i16); | |
888 | b += h * 4; | |
889 | } | |
890 | } | |
891 | } | |
892 | ||
893 | pub type IPred4x4Func = fn(buf: &mut [u8], off: usize, stride: usize, tr: &[u8]); | |
894 | pub type IPred8x8Func = fn(buf: &mut [u8], off: usize, stride: usize); | |
895 | pub type IPred8x8LumaFunc = fn(buf: &mut [u8], stride: usize, ctx: &IPred8Context); | |
896 | ||
897 | pub const IPRED4_DC128: usize = 11; | |
898 | pub const IPRED4_DC_TOP: usize = 10; | |
899 | pub const IPRED4_DC_LEFT: usize = 9; | |
900 | pub const IPRED8_DC128: usize = 6; | |
901 | pub const IPRED8_DC_TOP: usize = 5; | |
902 | pub const IPRED8_DC_LEFT: usize = 4; | |
903 | ||
904 | pub const IPRED_FUNCS4X4: [IPred4x4Func; 12] = [ | |
905 | ipred_4x4_ver, ipred_4x4_hor, ipred_4x4_dc, | |
906 | ipred_4x4_diag_down_left, ipred_4x4_diag_down_right, | |
907 | ipred_4x4_ver_right, ipred_4x4_hor_down, ipred_4x4_ver_left, ipred_4x4_hor_up, | |
908 | ipred_4x4_left_dc, ipred_4x4_top_dc, ipred_4x4_dc128 | |
909 | ]; | |
910 | ||
911 | pub const IPRED_FUNCS8X8_LUMA: [IPred8x8LumaFunc; 12] = [ | |
912 | ipred_y_8x8_ver, ipred_y_8x8_hor, ipred_y_8x8_dc, | |
913 | ipred_y_8x8_diag_down_left, ipred_y_8x8_diag_down_right, | |
914 | ipred_y_8x8_ver_right, ipred_y_8x8_hor_down, | |
915 | ipred_y_8x8_ver_left, ipred_y_8x8_hor_up, | |
916 | ipred_y_8x8_left_dc, ipred_y_8x8_top_dc, ipred_y_8x8_dc128 | |
917 | ]; | |
918 | ||
919 | pub const IPRED_FUNCS8X8_CHROMA: [IPred8x8Func; 7] = [ | |
920 | ipred_8x8_dc, ipred_8x8_hor, ipred_8x8_ver, ipred_8x8_plane, | |
921 | ipred_8x8_left_dc, ipred_8x8_top_dc, ipred_8x8_dc128 | |
922 | ]; | |
923 | ||
924 | pub const IPRED_FUNCS16X16: [IPred8x8Func; 7] = [ | |
925 | ipred_16x16_ver, ipred_16x16_hor, ipred_16x16_dc, ipred_16x16_plane, | |
926 | ipred_16x16_left_dc, ipred_16x16_top_dc, ipred_16x16_dc128 | |
927 | ]; | |
928 | ||
929 | fn clip_u8(val: i16) -> u8 { val.max(0).min(255) as u8 } | |
930 | ||
931 | const TMP_BUF_STRIDE: usize = 32; | |
932 | ||
933 | fn interp_block1(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize, hor: bool, avg0: bool) { | |
934 | let step = if hor { 1 } else { sstride }; | |
935 | let mut idx = 0; | |
936 | let avgidx = if avg0 { step * 2 } else { step * 3 }; | |
937 | for dline in dst.chunks_mut(dstride).take(h) { | |
938 | for (x, pix) in dline.iter_mut().take(w).enumerate() { | |
939 | let t = clip_u8(( i16::from(src[idx + x]) | |
940 | - 5 * i16::from(src[idx + x + step]) | |
941 | + 20 * i16::from(src[idx + x + step * 2]) | |
942 | + 20 * i16::from(src[idx + x + step * 3]) | |
943 | - 5 * i16::from(src[idx + x + step * 4]) | |
944 | + i16::from(src[idx + x + step * 5]) | |
945 | + 16) >> 5); | |
946 | *pix = ((u16::from(t) + u16::from(src[idx + x + avgidx]) + 1) >> 1) as u8; | |
947 | } | |
948 | idx += sstride; | |
949 | } | |
950 | } | |
951 | ||
952 | fn interp_block2(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize, hor: bool) { | |
953 | let step = if hor { 1 } else { sstride }; | |
954 | let mut idx = 0; | |
955 | for dline in dst.chunks_mut(dstride).take(h) { | |
956 | for (x, pix) in dline.iter_mut().take(w).enumerate() { | |
957 | *pix = clip_u8(( i16::from(src[idx + x]) | |
958 | - 5 * i16::from(src[idx + x + step]) | |
959 | + 20 * i16::from(src[idx + x + step * 2]) | |
960 | + 20 * i16::from(src[idx + x + step * 3]) | |
961 | - 5 * i16::from(src[idx + x + step * 4]) | |
962 | + i16::from(src[idx + x + step * 5]) | |
963 | + 16) >> 5); | |
964 | } | |
965 | idx += sstride; | |
966 | } | |
967 | } | |
968 | ||
969 | fn mc_avg_tmp(dst: &mut [u8], dstride: usize, w: usize, h: usize, tmp: &[u8], tmp2: &[u8]) { | |
970 | for (dline, (sline0, sline1)) in dst.chunks_mut(dstride).zip(tmp.chunks(TMP_BUF_STRIDE).zip(tmp2.chunks(TMP_BUF_STRIDE))).take(h) { | |
971 | for (pix, (&a, &b)) in dline.iter_mut().zip(sline0.iter().zip(sline1.iter())).take(w) { | |
972 | *pix = ((u16::from(a) + u16::from(b) + 1) >> 1) as u8; | |
973 | } | |
974 | } | |
975 | } | |
976 | ||
977 | fn h264_mc00(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
978 | for (dline, sline) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { | |
979 | dline[..w].copy_from_slice(&sline[..w]); | |
980 | } | |
981 | } | |
982 | ||
983 | fn h264_mc01(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
984 | interp_block1(dst, dstride, &src[sstride * 2..], sstride, w, h, true, true); | |
985 | } | |
986 | ||
987 | fn h264_mc02(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
988 | interp_block2(dst, dstride, &src[sstride * 2..], sstride, w, h, true); | |
989 | } | |
990 | ||
991 | fn h264_mc03(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
992 | interp_block1(dst, dstride, &src[sstride * 2..], sstride, w, h, true, false); | |
993 | } | |
994 | ||
995 | fn h264_mc10(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
996 | interp_block1(dst, dstride, &src[2..], sstride, w, h, false, true); | |
997 | } | |
998 | ||
999 | fn h264_mc11(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1000 | let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; | |
1001 | let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; | |
1002 | h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); | |
1003 | h264_mc20(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); | |
1004 | mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); | |
1005 | } | |
1006 | ||
1007 | fn h264_mc12(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1008 | let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; | |
1009 | let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; | |
1010 | h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); | |
1011 | h264_mc22(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); | |
1012 | mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); | |
1013 | } | |
1014 | ||
1015 | fn h264_mc13(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1016 | let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; | |
1017 | let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; | |
1018 | h264_mc02(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); | |
1019 | h264_mc20(&mut tmp2, TMP_BUF_STRIDE, &src[1..], sstride, w, h); | |
1020 | mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); | |
1021 | } | |
1022 | ||
1023 | fn h264_mc20(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1024 | interp_block2(dst, dstride, &src[2..], sstride, w, h, false); | |
1025 | } | |
1026 | ||
1027 | fn h264_mc21(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1028 | let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; | |
1029 | let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; | |
1030 | h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); | |
1031 | h264_mc20(&mut tmp2, TMP_BUF_STRIDE, src, sstride, w, h); | |
1032 | mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); | |
1033 | } | |
1034 | ||
1035 | fn h264_mc22(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1036 | let mut tmp = [0i32; TMP_BUF_STRIDE * 16]; | |
1037 | let mut idx = 0; | |
1038 | for dline in tmp.chunks_mut(TMP_BUF_STRIDE).take(h) { | |
1039 | for (x, pix) in dline.iter_mut().take(w + 5).enumerate() { | |
1040 | *pix = i32::from(src[idx + x]) | |
1041 | - 5 * i32::from(src[idx + x + sstride]) | |
1042 | + 20 * i32::from(src[idx + x + sstride * 2]) | |
1043 | + 20 * i32::from(src[idx + x + sstride * 3]) | |
1044 | - 5 * i32::from(src[idx + x + sstride * 4]) | |
1045 | + i32::from(src[idx + x + sstride * 5]); | |
1046 | } | |
1047 | idx += sstride; | |
1048 | } | |
1049 | for (dline, sline) in dst.chunks_mut(dstride).zip(tmp.chunks(TMP_BUF_STRIDE)).take(h) { | |
1050 | for (x, pix) in dline.iter_mut().take(w).enumerate() { | |
1051 | *pix = clip8(((sline[x] - 5 * sline[x + 1] + 20 * sline[x + 2] + 20 * sline[x + 3] - 5 * sline[x + 4] + sline[x + 5] + 512) >> 10) as i16); | |
1052 | } | |
1053 | } | |
1054 | } | |
1055 | ||
1056 | fn h264_mc23(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1057 | let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; | |
1058 | let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; | |
1059 | h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); | |
1060 | h264_mc20(&mut tmp2, TMP_BUF_STRIDE, &src[1..], sstride, w, h); | |
1061 | mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); | |
1062 | } | |
1063 | ||
1064 | fn h264_mc30(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1065 | interp_block1(dst, dstride, &src[2..], sstride, w, h, false, false); | |
1066 | } | |
1067 | ||
1068 | fn h264_mc31(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1069 | let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; | |
1070 | let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; | |
1071 | h264_mc20(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); | |
1072 | h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); | |
1073 | mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); | |
1074 | } | |
1075 | ||
1076 | fn h264_mc32(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1077 | let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; | |
1078 | let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; | |
1079 | h264_mc22(&mut tmp, TMP_BUF_STRIDE, src, sstride, w, h); | |
1080 | h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); | |
1081 | mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); | |
1082 | } | |
1083 | ||
1084 | fn h264_mc33(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, w: usize, h: usize) { | |
1085 | let mut tmp = [0u8; TMP_BUF_STRIDE * 16]; | |
1086 | let mut tmp2 = [0u8; TMP_BUF_STRIDE * 16]; | |
1087 | h264_mc20(&mut tmp, TMP_BUF_STRIDE, &src[1..], sstride, w, h); | |
1088 | h264_mc02(&mut tmp2, TMP_BUF_STRIDE, &src[sstride..], sstride, w, h); | |
1089 | mc_avg_tmp(dst, dstride, w, h, &tmp, &tmp2); | |
1090 | } | |
1091 | ||
1092 | ||
1093 | fn chroma_interp(dst: &mut [u8], dstride: usize, src: &[u8], sstride: usize, dx: u16, dy: u16, w: usize, h: usize) { | |
1094 | let a0 = 8 - dx; | |
1095 | let a1 = dx; | |
1096 | let b0 = 8 - dy; | |
1097 | let b1 = dy; | |
1098 | ||
1099 | let src1 = &src[sstride..]; | |
c05c6c1f KS |
1100 | if a0 == 8 && b0 == 8 { |
1101 | for (drow, line) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { | |
1102 | drow[..w].copy_from_slice(&line[..w]); | |
1103 | } | |
1104 | } else if a0 == 8 { | |
1105 | for (drow, (line0, line1)) in dst.chunks_mut(dstride).zip(src.chunks(sstride).zip(src1.chunks(sstride))).take(h) { | |
1106 | for (pix, (&a, &b)) in drow.iter_mut().take(w).zip(line0.iter().zip(line1.iter())) { | |
1107 | *pix = ((u16::from(a) * b0 + u16::from(b) * b1 + 4) >> 3) as u8; | |
1108 | } | |
1109 | } | |
1110 | } else if b0 == 8 { | |
1111 | for (drow, line) in dst.chunks_mut(dstride).zip(src.chunks(sstride)).take(h) { | |
1112 | let mut a = line[0]; | |
1113 | for (pix, &b) in drow.iter_mut().take(w).zip(line.iter().skip(1)) { | |
1114 | *pix = ((u16::from(a) * a0 + u16::from(b) * a1 + 4) >> 3) as u8; | |
1115 | a = b; | |
1116 | } | |
1117 | } | |
1118 | } else { | |
1119 | for (drow, (line0, line1)) in dst.chunks_mut(dstride).zip(src.chunks(sstride).zip(src1.chunks(sstride))).take(h) { | |
1120 | let mut a = line0[0]; | |
1121 | let mut c = line1[0]; | |
1122 | for (pix, (&b, &d)) in drow.iter_mut().take(w).zip(line0[1..].iter().zip(line1[1..].iter())) { | |
1123 | *pix = ((u16::from(a) * a0 * b0 + u16::from(b) * a1 * b0 + u16::from(c) * a0 * b1 + u16::from(d) * a1 * b1 + 0x20) >> 6) as u8; | |
1124 | a = b; | |
1125 | c = d; | |
1126 | } | |
696e4e20 KS |
1127 | } |
1128 | } | |
1129 | } | |
1130 | ||
1131 | const H264_LUMA_INTERP: &[BlkInterpFunc] = &[ | |
1132 | h264_mc00, h264_mc01, h264_mc02, h264_mc03, | |
1133 | h264_mc10, h264_mc11, h264_mc12, h264_mc13, | |
1134 | h264_mc20, h264_mc21, h264_mc22, h264_mc23, | |
1135 | h264_mc30, h264_mc31, h264_mc32, h264_mc33 | |
1136 | ]; | |
1137 | ||
1138 | pub fn do_mc(frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV) { | |
1139 | let mode = ((mv.x & 3) + (mv.y & 3) * 4) as usize; | |
1140 | copy_block(frm, refpic.clone(), 0, xpos, ypos, mv.x >> 2, mv.y >> 2, w, h, 2, 3, mode, H264_LUMA_INTERP); | |
1141 | ||
1142 | let (cw, ch) = refpic.get_dimensions(1); | |
1143 | let mvx = mv.x >> 3; | |
1144 | let mvy = mv.y >> 3; | |
1145 | let dx = (mv.x & 7) as u16; | |
1146 | let dy = (mv.y & 7) as u16; | |
1147 | let mut ebuf = [0u8; 18 * 9]; | |
1148 | let src_x = ((xpos >> 1) as isize) + (mvx as isize); | |
1149 | let src_y = ((ypos >> 1) as isize) + (mvy as isize); | |
1150 | let suoff = refpic.get_offset(1); | |
1151 | let svoff = refpic.get_offset(2); | |
1152 | let sustride = refpic.get_stride(1); | |
1153 | let svstride = refpic.get_stride(2); | |
1154 | let src = refpic.get_data(); | |
1155 | let cbw = w / 2; | |
1156 | let cbh = h / 2; | |
1157 | let (csrc, cstride) = if (src_x < 0) || (src_x + (cbw as isize) + 1 > (cw as isize)) || (src_y < 0) || (src_y + (cbh as isize) + 1 > (ch as isize)) { | |
1158 | edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf, 18, 1, 4); | |
1159 | edge_emu(&refpic, src_x, src_y, cbw+1, cbh+1, &mut ebuf[9..], 18, 2, 4); | |
1160 | ([&ebuf, &ebuf[9..]], [18, 18]) | |
1161 | } else { | |
1162 | ([&src[suoff + (src_x as usize) + (src_y as usize) * sustride..], | |
1163 | &src[svoff + (src_x as usize) + (src_y as usize) * svstride..]], | |
1164 | [sustride, svstride]) | |
1165 | }; | |
1166 | for chroma in 1..3 { | |
1167 | let off = frm.offset[chroma] + xpos / 2 + (ypos / 2) * frm.stride[chroma]; | |
1168 | chroma_interp(&mut frm.data[off..], frm.stride[chroma], csrc[chroma - 1], cstride[chroma - 1], dx, dy, cbw, cbh); | |
1169 | } | |
1170 | } | |
1171 | ||
1172 | pub fn gray_block(frm: &mut NASimpleVideoFrame<u8>, x: usize, y: usize, w: usize, h: usize) { | |
1173 | let yoff = frm.offset[0] + x + y * frm.stride[0]; | |
1174 | let coff = [frm.offset[1] + x / 2 + y / 2 * frm.stride[1], | |
1175 | frm.offset[2] + x / 2 + y / 2 * frm.stride[2]]; | |
1176 | if w == 16 && h == 16 { | |
1177 | IPRED_FUNCS16X16[IPRED8_DC128](frm.data, yoff, frm.stride[0]); | |
1178 | for chroma in 1..2 { | |
1179 | IPRED_FUNCS8X8_CHROMA[IPRED8_DC128](frm.data, coff[chroma - 1], frm.stride[chroma]); | |
1180 | } | |
1181 | } else if w == 8 && h == 8 { | |
1182 | IPRED_FUNCS8X8_CHROMA[IPRED8_DC128](frm.data, yoff, frm.stride[0]); | |
1183 | for chroma in 1..2 { | |
1184 | IPRED_FUNCS4X4[IPRED4_DC128](frm.data, coff[chroma - 1], frm.stride[chroma], &[128; 4]); | |
1185 | } | |
1186 | } else { | |
1187 | for row in frm.data[yoff..].chunks_mut(frm.stride[0]).take(h) { | |
1188 | for el in row[..w].iter_mut() { | |
1189 | *el = 128; | |
1190 | } | |
1191 | } | |
1192 | for chroma in 0..2 { | |
1193 | for row in frm.data[coff[chroma]..].chunks_mut(frm.stride[chroma + 1]).take(h / 2) { | |
1194 | for el in row[..w / 2].iter_mut() { | |
1195 | *el = 128; | |
1196 | } | |
1197 | } | |
1198 | } | |
1199 | } | |
1200 | } | |
1201 | ||
1202 | pub fn do_mc_avg(frm: &mut NASimpleVideoFrame<u8>, refpic: NAVideoBufferRef<u8>, xpos: usize, ypos: usize, w: usize, h: usize, mv: MV, avg_buf: &mut NAVideoBufferRef<u8>) { | |
1203 | let mut afrm = NASimpleVideoFrame::from_video_buf(avg_buf).unwrap(); | |
1204 | let amv = MV { x: mv.x + (xpos as i16) * 4, y: mv.y + (ypos as i16) * 4 }; | |
1205 | do_mc(&mut afrm, refpic, 0, 0, w, h, amv); | |
1206 | for comp in 0..3 { | |
1207 | let shift = if comp == 0 { 0 } else { 1 }; | |
1208 | avg(&mut frm.data[frm.offset[comp] + (xpos >> shift) + (ypos >> shift) * frm.stride[comp]..], frm.stride[comp], &afrm.data[afrm.offset[comp]..], afrm.stride[comp], w >> shift, h >> shift); | |
1209 | } | |
1210 | } | |
1211 | ||
1212 | macro_rules! loop_filter { | |
1213 | (lumaedge; $buf: expr, $off: expr, $step: expr, $alpha: expr, $beta: expr) => { | |
1214 | let p2 = i16::from($buf[$off - $step * 3]); | |
1215 | let p1 = i16::from($buf[$off - $step * 2]); | |
1216 | let p0 = i16::from($buf[$off - $step]); | |
1217 | let q0 = i16::from($buf[$off]); | |
1218 | let q1 = i16::from($buf[$off + $step]); | |
1219 | let q2 = i16::from($buf[$off + $step * 2]); | |
1220 | let a_p = (p2 - p0).abs() < $beta; | |
1221 | let a_q = (q2 - q0).abs() < $beta; | |
1222 | if a_p && (p0 - q0).abs() < (($alpha >> 2) + 2) { | |
1223 | let p3 = i16::from($buf[$off - $step * 4]); | |
1224 | $buf[$off - $step * 3] = ((2 * p3 + 3 * p2 + p1 + p0 + q0 + 4) >> 3) as u8; | |
1225 | $buf[$off - $step * 2] = ((p2 + p1 + p0 + q0 + 2) >> 2) as u8; | |
1226 | $buf[$off - $step] = ((p2 + 2 * p1 + 2 * p0 + 2 * q0 + q1 + 4) >> 3) as u8; | |
1227 | } else { | |
1228 | $buf[$off - $step] = ((2 * p1 + p0 + q1 + 2) >> 2) as u8; | |
1229 | } | |
1230 | if a_q && (p0 - q0).abs() < (($alpha >> 2) + 2) { | |
1231 | let q3 = i16::from($buf[$off + $step * 3]); | |
1232 | $buf[$off] = ((p1 + 2 * p0 + 2 * q0 + 2 * q1 + q2 + 4) >> 3) as u8; | |
1233 | $buf[$off + $step] = ((p0 + q0 + q1 + q2 + 2) >> 2) as u8; | |
1234 | $buf[$off + $step * 2] = ((2 * q3 + 3 * q2 + q1 + q0 + p0 + 4) >> 3) as u8; | |
1235 | } else { | |
1236 | $buf[$off] = ((2 * q1 + q0 + p1 + 2) >> 2) as u8; | |
1237 | } | |
1238 | }; | |
1239 | (chromaedge; $buf: expr, $off: expr, $step: expr) => { | |
1240 | let p1 = i16::from($buf[$off - $step * 2]); | |
1241 | let p0 = i16::from($buf[$off - $step]); | |
1242 | let q0 = i16::from($buf[$off]); | |
1243 | let q1 = i16::from($buf[$off + $step]); | |
1244 | $buf[$off - $step] = ((2 * p1 + p0 + q1 + 2) >> 2) as u8; | |
1245 | $buf[$off] = ((2 * q1 + q0 + p1 + 2) >> 2) as u8; | |
1246 | }; | |
1247 | (lumanormal; $buf: expr, $off: expr, $step: expr, $tc0: expr, $beta: expr) => { | |
1248 | let p2 = i16::from($buf[$off - $step * 3]); | |
1249 | let p1 = i16::from($buf[$off - $step * 2]); | |
1250 | let p0 = i16::from($buf[$off - $step]); | |
1251 | let q0 = i16::from($buf[$off]); | |
1252 | let q1 = i16::from($buf[$off + $step]); | |
1253 | let q2 = i16::from($buf[$off + $step * 2]); | |
1254 | let a_p = (p2 - p0).abs() < $beta; | |
1255 | let a_q = (q2 - q0).abs() < $beta; | |
1256 | let tc = $tc0 + (a_p as i16) + (a_q as i16); | |
1257 | let delta = (((q0 - p0) * 4 + (p1 - q1) + 4) >> 3).max(-tc).min(tc); | |
1258 | if a_p && ($tc0 > 0) { | |
1259 | $buf[$off - $step * 2] = clip8(p1 + ((p2 + ((p0 + q0 + 1) >> 1) - p1 * 2) >> 1).max(-$tc0).min($tc0)); | |
1260 | } | |
1261 | $buf[$off - $step] = clip8(p0 + delta); | |
1262 | $buf[$off] = clip8(q0 - delta); | |
1263 | if a_q && ($tc0 > 0) { | |
1264 | $buf[$off + $step] = clip8(q1 + ((q2 + ((p0 + q0 + 1) >> 1) - q1 * 2) >> 1).max(-$tc0).min($tc0)); | |
1265 | } | |
1266 | }; | |
1267 | (chromanormal; $buf: expr, $off: expr, $step: expr, $tc0: expr) => { | |
1268 | let p1 = i16::from($buf[$off - $step * 2]); | |
1269 | let p0 = i16::from($buf[$off - $step]); | |
1270 | let q0 = i16::from($buf[$off]); | |
1271 | let q1 = i16::from($buf[$off + $step]); | |
1272 | let tc = $tc0 + 1; | |
1273 | let delta = (((q0 - p0) * 4 + (p1 - q1) + 4) >> 3).max(-tc).min(tc); | |
1274 | $buf[$off - $step] = clip8(p0 + delta); | |
1275 | $buf[$off] = clip8(q0 - delta); | |
1276 | } | |
1277 | } | |
1278 | ||
1279 | fn check_filter(buf: &[u8], off: usize, step: usize, alpha: i16, beta: i16) -> bool { | |
1280 | let p1 = i16::from(buf[off - step * 2]); | |
1281 | let p0 = i16::from(buf[off - step]); | |
1282 | let q0 = i16::from(buf[off]); | |
1283 | let q1 = i16::from(buf[off + step]); | |
1284 | (p0 - q0).abs() < alpha && (p1 - p0).abs() < beta && (q1 - q0).abs() < beta | |
1285 | } | |
1286 | ||
1287 | pub fn loop_filter_lumaedge_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16) { | |
1288 | for _ in 0..4 { | |
1289 | if check_filter(dst, off, 1, alpha, beta) { | |
1290 | loop_filter!(lumaedge; dst, off, 1, alpha, beta); | |
1291 | } | |
1292 | off += stride; | |
1293 | } | |
1294 | } | |
1295 | pub fn loop_filter_lumaedge_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16) { | |
1296 | for x in 0..4 { | |
1297 | if check_filter(dst, off + x, stride, alpha, beta) { | |
1298 | loop_filter!(lumaedge; dst, off + x, stride, alpha, beta); | |
1299 | } | |
1300 | } | |
1301 | } | |
1302 | pub fn loop_filter_lumanormal_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) { | |
1303 | for _ in 0..4 { | |
1304 | if check_filter(dst, off, 1, alpha, beta) { | |
1305 | loop_filter!(lumanormal; dst, off, 1, tc0, beta); | |
1306 | } | |
1307 | off += stride; | |
1308 | } | |
1309 | } | |
1310 | pub fn loop_filter_lumanormal_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) { | |
1311 | for x in 0..4 { | |
1312 | if check_filter(dst, off + x, stride, alpha, beta) { | |
1313 | loop_filter!(lumanormal; dst, off + x, stride, tc0, beta); | |
1314 | } | |
1315 | } | |
1316 | } | |
1317 | pub fn loop_filter_chromaedge_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16) { | |
1318 | for _ in 0..4 { | |
1319 | if check_filter(dst, off, 1, alpha, beta) { | |
1320 | loop_filter!(chromaedge; dst, off, 1); | |
1321 | } | |
1322 | off += stride; | |
1323 | } | |
1324 | } | |
1325 | pub fn loop_filter_chromaedge_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16) { | |
1326 | for x in 0..4 { | |
1327 | if check_filter(dst, off + x, stride, alpha, beta) { | |
1328 | loop_filter!(chromaedge; dst, off + x, stride); | |
1329 | } | |
1330 | } | |
1331 | } | |
1332 | pub fn loop_filter_chromanormal_v(dst: &mut [u8], mut off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) { | |
1333 | for _ in 0..4 { | |
1334 | if check_filter(dst, off, 1, alpha, beta) { | |
1335 | loop_filter!(chromanormal; dst, off, 1, tc0); | |
1336 | } | |
1337 | off += stride; | |
1338 | } | |
1339 | } | |
1340 | pub fn loop_filter_chromanormal_h(dst: &mut [u8], off: usize, stride: usize, alpha: i16, beta: i16, tc0: i16) { | |
1341 | for x in 0..4 { | |
1342 | if check_filter(dst, off + x, stride, alpha, beta) { | |
1343 | loop_filter!(chromanormal; dst, off + x, stride, tc0); | |
1344 | } | |
1345 | } | |
1346 | } |