]> git.nihav.org Git - nihav.git/blob - nihav-core/src/scale/mod.rs
vp6: switch to MD5-based tests
[nihav.git] / nihav-core / src / scale / mod.rs
1 use crate::frame::*;
2
3 mod kernel;
4
5 mod colorcvt;
6 mod repack;
7 mod scale;
8
9 #[derive(Clone,Copy,PartialEq)]
10 pub struct ScaleInfo {
11 pub fmt: NAPixelFormaton,
12 pub width: usize,
13 pub height: usize,
14 }
15
16 impl std::fmt::Display for ScaleInfo {
17 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
18 write!(f, "({}x{}, {})", self.width, self.height, self.fmt)
19 }
20 }
21
22 #[derive(Debug,Clone,Copy,PartialEq)]
23 #[allow(dead_code)]
24 pub enum ScaleError {
25 NoFrame,
26 AllocError,
27 InvalidArgument,
28 NotImplemented,
29 Bug,
30 }
31
32 pub type ScaleResult<T> = Result<T, ScaleError>;
33
34 /*trait Kernel {
35 fn init(&mut self, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo) -> ScaleResult<NABufferType>;
36 fn process(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType);
37 }*/
38
39 struct KernelDesc {
40 name: &'static str,
41 create: fn () -> Box<dyn kernel::Kernel>,
42 }
43
44 impl KernelDesc {
45 fn find(name: &str) -> ScaleResult<Box<dyn kernel::Kernel>> {
46 for kern in KERNELS.iter() {
47 if kern.name == name {
48 return Ok((kern.create)());
49 }
50 }
51 Err(ScaleError::InvalidArgument)
52 }
53 }
54
55 const KERNELS: &[KernelDesc] = &[
56 KernelDesc { name: "pack", create: repack::create_pack },
57 KernelDesc { name: "unpack", create: repack::create_unpack },
58 KernelDesc { name: "depal", create: repack::create_depal },
59 KernelDesc { name: "scale", create: scale::create_scale },
60 KernelDesc { name: "rgb_to_yuv", create: colorcvt::create_rgb2yuv },
61 KernelDesc { name: "yuv_to_rgb", create: colorcvt::create_yuv2rgb },
62 ];
63
64 struct Stage {
65 fmt_out: ScaleInfo,
66 tmp_pic: NABufferType,
67 next: Option<Box<Stage>>,
68 worker: Box<dyn kernel::Kernel>,
69 }
70
71 pub fn get_scale_fmt_from_pic(pic: &NABufferType) -> ScaleInfo {
72 let info = pic.get_video_info().unwrap();
73 ScaleInfo { fmt: info.get_format(), width: info.get_width(), height: info.get_height() }
74 }
75
76 impl Stage {
77 fn new(name: &str, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo) -> ScaleResult<Self> {
78 let mut worker = KernelDesc::find(name)?;
79 let tmp_pic = worker.init(in_fmt, dest_fmt)?;
80 let fmt_out = get_scale_fmt_from_pic(&tmp_pic);
81 Ok(Self { fmt_out, tmp_pic, next: None, worker })
82 }
83 fn add(&mut self, new: Stage) {
84 if let Some(ref mut next) = self.next {
85 next.add(new);
86 } else {
87 self.next = Some(Box::new(new));
88 }
89 }
90 fn process(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType) -> ScaleResult<()> {
91 if let Some(ref mut nextstage) = self.next {
92 self.worker.process(pic_in, &mut self.tmp_pic);
93 nextstage.process(&self.tmp_pic, pic_out)?;
94 } else {
95 self.worker.process(pic_in, pic_out);
96 }
97 Ok(())
98 }
99 fn drop_last_tmp(&mut self) {
100 if let Some(ref mut nextstage) = self.next {
101 nextstage.drop_last_tmp();
102 } else {
103 self.tmp_pic = NABufferType::None;
104 }
105 }
106 }
107
108 pub struct NAScale {
109 fmt_in: ScaleInfo,
110 fmt_out: ScaleInfo,
111 just_convert: bool,
112 pipeline: Option<Stage>,
113 }
114
115 fn check_format(in_fmt: NAVideoInfo, ref_fmt: &ScaleInfo, just_convert: bool) -> ScaleResult<()> {
116 if in_fmt.get_format() != ref_fmt.fmt { return Err(ScaleError::InvalidArgument); }
117 if !just_convert && (in_fmt.get_width() != ref_fmt.width || in_fmt.get_height() != ref_fmt.height) {
118 return Err(ScaleError::InvalidArgument);
119 }
120 Ok(())
121 }
122
123 fn copy(pic_in: &NABufferType, pic_out: &mut NABufferType)
124 {
125 if let (Some(ref sbuf), Some(ref mut dbuf)) = (pic_in.get_vbuf(), pic_out.get_vbuf()) {
126 let mut same = true;
127 let num_components = sbuf.get_info().get_format().get_num_comp();
128 for i in 0..num_components {
129 if sbuf.get_stride(i) != dbuf.get_stride(i) {
130 same = false;
131 break;
132 }
133 if sbuf.get_offset(i) != dbuf.get_offset(i) {
134 same = false;
135 break;
136 }
137 }
138 if same {
139 let sdata = sbuf.get_data();
140 let ddata = dbuf.get_data_mut().unwrap();
141 ddata.copy_from_slice(&sdata[0..]);
142 } else {
143 let sdata = sbuf.get_data();
144 for comp in 0..num_components {
145 let (_, h) = sbuf.get_dimensions(comp);
146 let src = &sdata[sbuf.get_offset(comp)..];
147 let sstride = sbuf.get_stride(comp);
148 let doff = dbuf.get_offset(comp);
149 let dstride = dbuf.get_stride(comp);
150 let ddata = dbuf.get_data_mut().unwrap();
151 let dst = &mut ddata[doff..];
152 let copy_size = sstride.min(dstride);
153 for (dline, sline) in dst.chunks_exact_mut(dstride).take(h).zip(src.chunks_exact(sstride)) {
154 (&mut dline[..copy_size]).copy_from_slice(&sline[..copy_size]);
155 }
156 }
157 }
158 } else {
159 unimplemented!();
160 }
161 }
162
163 macro_rules! add_stage {
164 ($head:expr, $new:expr) => {
165 if let Some(ref mut h) = $head {
166 h.add($new);
167 } else {
168 $head = Some($new);
169 }
170 };
171 }
172 fn is_better_fmt(a: &ScaleInfo, b: &ScaleInfo) -> bool {
173 if (a.width >= b.width) && (a.height >= b.height) {
174 return true;
175 }
176 if a.fmt.get_max_depth() > b.fmt.get_max_depth() {
177 return true;
178 }
179 if a.fmt.get_max_subsampling() < b.fmt.get_max_subsampling() {
180 return true;
181 }
182 false
183 }
184 fn build_pipeline(ifmt: &ScaleInfo, ofmt: &ScaleInfo, just_convert: bool) -> ScaleResult<Option<Stage>> {
185 let inname = ifmt.fmt.get_model().get_short_name();
186 let outname = ofmt.fmt.get_model().get_short_name();
187
188 println!("convert {} -> {}", ifmt, ofmt);
189 let needs_scale = if (ofmt.fmt.get_max_subsampling() > 0) &&
190 (ofmt.fmt.get_max_subsampling() != ifmt.fmt.get_max_subsampling()) {
191 true
192 } else {
193 !just_convert
194 };
195 let needs_unpack = !ifmt.fmt.is_unpacked();
196 let needs_pack = !ofmt.fmt.is_unpacked();
197 let needs_convert = inname != outname;
198 let scale_before_cvt = is_better_fmt(&ifmt, &ofmt) && needs_convert
199 && (ofmt.fmt.get_max_subsampling() == 0);
200 //todo stages for model and gamma conversion
201
202 let mut stages: Option<Stage> = None;
203 let mut cur_fmt = *ifmt;
204
205 if needs_unpack {
206 println!("[adding unpack]");
207 let new_stage = if !cur_fmt.fmt.is_paletted() {
208 Stage::new("unpack", &cur_fmt, &ofmt)?
209 } else {
210 Stage::new("depal", &cur_fmt, &ofmt)?
211 };
212 cur_fmt = new_stage.fmt_out;
213 add_stage!(stages, new_stage);
214 }
215 if needs_scale && scale_before_cvt {
216 println!("[adding scale]");
217 let new_stage = Stage::new("scale", &cur_fmt, &ofmt)?;
218 cur_fmt = new_stage.fmt_out;
219 add_stage!(stages, new_stage);
220 }
221 if needs_convert {
222 println!("[adding convert]");
223 let cvtname = format!("{}_to_{}", inname, outname);
224 println!("[{}]", cvtname);
225 let new_stage = Stage::new(&cvtname, &cur_fmt, &ofmt)?;
226 //todo if fails try converting via RGB or YUV
227 cur_fmt = new_stage.fmt_out;
228 add_stage!(stages, new_stage);
229 //todo alpha plane copy/add
230 }
231 if needs_scale && !scale_before_cvt {
232 println!("[adding scale]");
233 let new_stage = Stage::new("scale", &cur_fmt, &ofmt)?;
234 cur_fmt = new_stage.fmt_out;
235 add_stage!(stages, new_stage);
236 }
237 if needs_pack {
238 println!("[adding pack]");
239 let new_stage = Stage::new("pack", &cur_fmt, &ofmt)?;
240 //cur_fmt = new_stage.fmt_out;
241 add_stage!(stages, new_stage);
242 }
243
244 if let Some(ref mut head) = stages {
245 head.drop_last_tmp();
246 }
247
248 Ok(stages)
249 }
250
251 fn swap_plane<T:Copy>(data: &mut [T], stride: usize, h: usize, line0: &mut [T], line1: &mut [T]) {
252 let mut doff0 = 0;
253 let mut doff1 = stride * (h - 1);
254 for _ in 0..h/2 {
255 line0.copy_from_slice(&data[doff0..][..stride]);
256 line1.copy_from_slice(&data[doff1..][..stride]);
257 (&mut data[doff1..][..stride]).copy_from_slice(line0);
258 (&mut data[doff0..][..stride]).copy_from_slice(line1);
259 doff0 += stride;
260 doff1 -= stride;
261 }
262 }
263
264 pub fn flip_picture(pic: &mut NABufferType) -> ScaleResult<()> {
265 match pic {
266 NABufferType::Video(ref mut vb) => {
267 let ncomp = vb.get_num_components();
268 for comp in 0..ncomp {
269 let off = vb.get_offset(comp);
270 let stride = vb.get_stride(comp);
271 let (_, h) = vb.get_dimensions(comp);
272 let data = vb.get_data_mut().unwrap();
273 let mut line0 = vec![0; stride];
274 let mut line1 = vec![0; stride];
275 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
276 }
277 },
278 NABufferType::Video16(ref mut vb) => {
279 let ncomp = vb.get_num_components();
280 for comp in 0..ncomp {
281 let off = vb.get_offset(comp);
282 let stride = vb.get_stride(comp);
283 let (_, h) = vb.get_dimensions(comp);
284 let data = vb.get_data_mut().unwrap();
285 let mut line0 = vec![0; stride];
286 let mut line1 = vec![0; stride];
287 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
288 }
289 },
290 NABufferType::Video32(ref mut vb) => {
291 let ncomp = vb.get_num_components();
292 for comp in 0..ncomp {
293 let off = vb.get_offset(comp);
294 let stride = vb.get_stride(comp);
295 let (_, h) = vb.get_dimensions(comp);
296 let data = vb.get_data_mut().unwrap();
297 let mut line0 = vec![0; stride];
298 let mut line1 = vec![0; stride];
299 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
300 }
301 },
302 NABufferType::VideoPacked(ref mut vb) => {
303 let ncomp = vb.get_num_components();
304 for comp in 0..ncomp {
305 let off = vb.get_offset(comp);
306 let stride = vb.get_stride(comp);
307 let (_, h) = vb.get_dimensions(comp);
308 let data = vb.get_data_mut().unwrap();
309 let mut line0 = vec![0; stride];
310 let mut line1 = vec![0; stride];
311 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
312 }
313 },
314 _ => { return Err(ScaleError::InvalidArgument); },
315 };
316 Ok(())
317 }
318
319 impl NAScale {
320 pub fn new(fmt_in: ScaleInfo, fmt_out: ScaleInfo) -> ScaleResult<Self> {
321 let pipeline;
322 let just_convert = (fmt_in.width == fmt_out.width) && (fmt_in.height == fmt_out.height);
323 if fmt_in != fmt_out {
324 pipeline = build_pipeline(&fmt_in, &fmt_out, just_convert)?;
325 } else {
326 pipeline = None;
327 }
328 Ok(Self { fmt_in, fmt_out, just_convert, pipeline })
329 }
330 pub fn needs_processing(&self) -> bool { self.pipeline.is_some() }
331 pub fn get_in_fmt(&self) -> ScaleInfo { self.fmt_in }
332 pub fn get_out_fmt(&self) -> ScaleInfo { self.fmt_out }
333 pub fn convert(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType) -> ScaleResult<()> {
334 let in_info = pic_in.get_video_info();
335 let out_info = pic_out.get_video_info();
336 if in_info.is_none() || out_info.is_none() { return Err(ScaleError::InvalidArgument); }
337 let in_info = in_info.unwrap();
338 let out_info = out_info.unwrap();
339 if self.just_convert &&
340 (in_info.get_width() != out_info.get_width() || in_info.get_height() != out_info.get_height()) {
341 return Err(ScaleError::InvalidArgument);
342 }
343 let needs_flip = in_info.is_flipped() ^ out_info.is_flipped();
344 check_format(in_info, &self.fmt_in, self.just_convert)?;
345 check_format(out_info, &self.fmt_out, self.just_convert)?;
346 let ret = if let Some(ref mut pipe) = self.pipeline {
347 pipe.process(pic_in, pic_out)
348 } else {
349 copy(pic_in, pic_out);
350 Ok(())
351 };
352 if ret.is_ok() && needs_flip {
353 flip_picture(pic_out)?;
354 }
355 ret
356 }
357 }
358
359 #[cfg(test)]
360 mod test {
361 use super::*;
362
363 fn fill_pic(pic: &mut NABufferType, val: u8) {
364 if let Some(ref mut buf) = pic.get_vbuf() {
365 let data = buf.get_data_mut().unwrap();
366 for el in data.iter_mut() { *el = val; }
367 } else if let Some(ref mut buf) = pic.get_vbuf16() {
368 let data = buf.get_data_mut().unwrap();
369 for el in data.iter_mut() { *el = val as u16; }
370 } else if let Some(ref mut buf) = pic.get_vbuf32() {
371 let data = buf.get_data_mut().unwrap();
372 for el in data.iter_mut() { *el = (val as u32) * 0x01010101; }
373 }
374 }
375 #[test]
376 fn test_convert() {
377 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(1, 1, false, RGB565_FORMAT), 3).unwrap();
378 fill_pic(&mut in_pic, 42);
379 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(1, 1, false, RGB24_FORMAT), 3).unwrap();
380 fill_pic(&mut out_pic, 0);
381 let ifmt = get_scale_fmt_from_pic(&in_pic);
382 let ofmt = get_scale_fmt_from_pic(&out_pic);
383 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
384 scaler.convert(&in_pic, &mut out_pic).unwrap();
385 let obuf = out_pic.get_vbuf().unwrap();
386 let odata = obuf.get_data();
387 assert_eq!(odata[0], 0x0);
388 assert_eq!(odata[1], 0x4);
389 assert_eq!(odata[2], 0x52);
390
391 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, RGB24_FORMAT), 3).unwrap();
392 fill_pic(&mut in_pic, 42);
393 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, YUV420_FORMAT), 3).unwrap();
394 fill_pic(&mut out_pic, 0);
395 let ifmt = get_scale_fmt_from_pic(&in_pic);
396 let ofmt = get_scale_fmt_from_pic(&out_pic);
397 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
398 scaler.convert(&in_pic, &mut out_pic).unwrap();
399 let obuf = out_pic.get_vbuf().unwrap();
400 let yoff = obuf.get_offset(0);
401 let uoff = obuf.get_offset(1);
402 let voff = obuf.get_offset(2);
403 let odata = obuf.get_data();
404 assert_eq!(odata[yoff], 42);
405 assert!(((odata[uoff] ^ 0x80) as i8).abs() <= 1);
406 assert!(((odata[voff] ^ 0x80) as i8).abs() <= 1);
407 let mut scaler = NAScale::new(ofmt, ifmt).unwrap();
408 scaler.convert(&out_pic, &mut in_pic).unwrap();
409 let obuf = in_pic.get_vbuf().unwrap();
410 let odata = obuf.get_data();
411 assert_eq!(odata[0], 42);
412 }
413 #[test]
414 fn test_scale() {
415 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(2, 2, false, RGB565_FORMAT), 3).unwrap();
416 fill_pic(&mut in_pic, 42);
417 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(3, 3, false, RGB565_FORMAT), 3).unwrap();
418 fill_pic(&mut out_pic, 0);
419 let ifmt = get_scale_fmt_from_pic(&in_pic);
420 let ofmt = get_scale_fmt_from_pic(&out_pic);
421 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
422 scaler.convert(&in_pic, &mut out_pic).unwrap();
423 let obuf = out_pic.get_vbuf16().unwrap();
424 let odata = obuf.get_data();
425 assert_eq!(odata[0], 42);
426 }
427 #[test]
428 fn test_scale_and_convert() {
429 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(7, 3, false, RGB565_FORMAT), 3).unwrap();
430 fill_pic(&mut in_pic, 42);
431 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, YUV420_FORMAT), 3).unwrap();
432 fill_pic(&mut out_pic, 0);
433 let ifmt = get_scale_fmt_from_pic(&in_pic);
434 let ofmt = get_scale_fmt_from_pic(&out_pic);
435 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
436 scaler.convert(&in_pic, &mut out_pic).unwrap();
437 let obuf = out_pic.get_vbuf().unwrap();
438 let yoff = obuf.get_offset(0);
439 let uoff = obuf.get_offset(1);
440 let voff = obuf.get_offset(2);
441 let odata = obuf.get_data();
442 assert_eq!(odata[yoff], 28);
443 assert_eq!(odata[uoff], 154);
444 assert_eq!(odata[voff], 103);
445 }
446 }