9 #[derive(Clone,Copy,PartialEq)]
10 pub struct ScaleInfo {
11 pub fmt: NAPixelFormaton,
16 impl std::fmt::Display for ScaleInfo {
17 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
18 write!(f, "({}x{}, {})", self.width, self.height, self.fmt)
22 #[derive(Debug,Clone,Copy,PartialEq)]
32 pub type ScaleResult<T> = Result<T, ScaleError>;
35 fn init(&mut self, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo) -> ScaleResult<NABufferType>;
36 fn process(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType);
41 create: fn () -> Box<dyn kernel::Kernel>,
45 fn find(name: &str) -> ScaleResult<Box<dyn kernel::Kernel>> {
46 for kern in KERNELS.iter() {
47 if kern.name == name {
48 return Ok((kern.create)());
51 Err(ScaleError::InvalidArgument)
55 const KERNELS: &[KernelDesc] = &[
56 KernelDesc { name: "pack", create: repack::create_pack },
57 KernelDesc { name: "unpack", create: repack::create_unpack },
58 KernelDesc { name: "depal", create: repack::create_depal },
59 KernelDesc { name: "scale", create: scale::create_scale },
60 KernelDesc { name: "rgb_to_yuv", create: colorcvt::create_rgb2yuv },
61 KernelDesc { name: "yuv_to_rgb", create: colorcvt::create_yuv2rgb },
66 tmp_pic: NABufferType,
67 next: Option<Box<Stage>>,
68 worker: Box<dyn kernel::Kernel>,
71 pub fn get_scale_fmt_from_pic(pic: &NABufferType) -> ScaleInfo {
72 let info = pic.get_video_info().unwrap();
73 ScaleInfo { fmt: info.get_format(), width: info.get_width(), height: info.get_height() }
77 fn new(name: &str, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo) -> ScaleResult<Self> {
78 let mut worker = KernelDesc::find(name)?;
79 let tmp_pic = worker.init(in_fmt, dest_fmt)?;
80 let fmt_out = get_scale_fmt_from_pic(&tmp_pic);
81 Ok(Self { fmt_out, tmp_pic, next: None, worker })
83 fn add(&mut self, new: Stage) {
84 if let Some(ref mut next) = self.next {
87 self.next = Some(Box::new(new));
90 fn process(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType) -> ScaleResult<()> {
91 if let Some(ref mut nextstage) = self.next {
92 self.worker.process(pic_in, &mut self.tmp_pic);
93 nextstage.process(&self.tmp_pic, pic_out)?;
95 self.worker.process(pic_in, pic_out);
99 fn drop_last_tmp(&mut self) {
100 if let Some(ref mut nextstage) = self.next {
101 nextstage.drop_last_tmp();
103 self.tmp_pic = NABufferType::None;
112 pipeline: Option<Stage>,
115 fn check_format(in_fmt: NAVideoInfo, ref_fmt: &ScaleInfo, just_convert: bool) -> ScaleResult<()> {
116 if in_fmt.get_format() != ref_fmt.fmt { return Err(ScaleError::InvalidArgument); }
117 if !just_convert && (in_fmt.get_width() != ref_fmt.width || in_fmt.get_height() != ref_fmt.height) {
118 return Err(ScaleError::InvalidArgument);
123 fn copy(pic_in: &NABufferType, pic_out: &mut NABufferType)
125 if let (Some(ref sbuf), Some(ref mut dbuf)) = (pic_in.get_vbuf(), pic_out.get_vbuf()) {
127 let num_components = sbuf.get_info().get_format().get_num_comp();
128 for i in 0..num_components {
129 if sbuf.get_stride(i) != dbuf.get_stride(i) {
133 if sbuf.get_offset(i) != dbuf.get_offset(i) {
139 let sdata = sbuf.get_data();
140 let ddata = dbuf.get_data_mut().unwrap();
141 ddata.copy_from_slice(&sdata[0..]);
143 let sdata = sbuf.get_data();
144 for comp in 0..num_components {
145 let (_, h) = sbuf.get_dimensions(comp);
146 let src = &sdata[sbuf.get_offset(comp)..];
147 let sstride = sbuf.get_stride(comp);
148 let doff = dbuf.get_offset(comp);
149 let dstride = dbuf.get_stride(comp);
150 let ddata = dbuf.get_data_mut().unwrap();
151 let dst = &mut ddata[doff..];
152 let copy_size = sstride.min(dstride);
153 for (dline, sline) in dst.chunks_exact_mut(dstride).take(h).zip(src.chunks_exact(sstride)) {
154 (&mut dline[..copy_size]).copy_from_slice(&sline[..copy_size]);
163 macro_rules! add_stage {
164 ($head:expr, $new:expr) => {
165 if let Some(ref mut h) = $head {
172 fn is_better_fmt(a: &ScaleInfo, b: &ScaleInfo) -> bool {
173 if (a.width >= b.width) && (a.height >= b.height) {
176 if a.fmt.get_max_depth() > b.fmt.get_max_depth() {
179 if a.fmt.get_max_subsampling() < b.fmt.get_max_subsampling() {
184 fn build_pipeline(ifmt: &ScaleInfo, ofmt: &ScaleInfo, just_convert: bool) -> ScaleResult<Option<Stage>> {
185 let inname = ifmt.fmt.get_model().get_short_name();
186 let outname = ofmt.fmt.get_model().get_short_name();
188 println!("convert {} -> {}", ifmt, ofmt);
189 let needs_scale = if (ofmt.fmt.get_max_subsampling() > 0) &&
190 (ofmt.fmt.get_max_subsampling() != ifmt.fmt.get_max_subsampling()) {
195 let needs_unpack = !ifmt.fmt.is_unpacked();
196 let needs_pack = !ofmt.fmt.is_unpacked();
197 let needs_convert = inname != outname;
198 let scale_before_cvt = is_better_fmt(&ifmt, &ofmt) && needs_convert
199 && (ofmt.fmt.get_max_subsampling() == 0);
200 //todo stages for model and gamma conversion
202 let mut stages: Option<Stage> = None;
203 let mut cur_fmt = *ifmt;
206 println!("[adding unpack]");
207 let new_stage = if !cur_fmt.fmt.is_paletted() {
208 Stage::new("unpack", &cur_fmt, &ofmt)?
210 Stage::new("depal", &cur_fmt, &ofmt)?
212 cur_fmt = new_stage.fmt_out;
213 add_stage!(stages, new_stage);
215 if needs_scale && scale_before_cvt {
216 println!("[adding scale]");
217 let new_stage = Stage::new("scale", &cur_fmt, &ofmt)?;
218 cur_fmt = new_stage.fmt_out;
219 add_stage!(stages, new_stage);
222 println!("[adding convert]");
223 let cvtname = format!("{}_to_{}", inname, outname);
224 println!("[{}]", cvtname);
225 let new_stage = Stage::new(&cvtname, &cur_fmt, &ofmt)?;
226 //todo if fails try converting via RGB or YUV
227 cur_fmt = new_stage.fmt_out;
228 add_stage!(stages, new_stage);
229 //todo alpha plane copy/add
231 if needs_scale && !scale_before_cvt {
232 println!("[adding scale]");
233 let new_stage = Stage::new("scale", &cur_fmt, &ofmt)?;
234 cur_fmt = new_stage.fmt_out;
235 add_stage!(stages, new_stage);
238 println!("[adding pack]");
239 let new_stage = Stage::new("pack", &cur_fmt, &ofmt)?;
240 //cur_fmt = new_stage.fmt_out;
241 add_stage!(stages, new_stage);
244 if let Some(ref mut head) = stages {
245 head.drop_last_tmp();
251 fn swap_plane<T:Copy>(data: &mut [T], stride: usize, h: usize, line0: &mut [T], line1: &mut [T]) {
253 let mut doff1 = stride * (h - 1);
255 line0.copy_from_slice(&data[doff0..][..stride]);
256 line1.copy_from_slice(&data[doff1..][..stride]);
257 (&mut data[doff1..][..stride]).copy_from_slice(line0);
258 (&mut data[doff0..][..stride]).copy_from_slice(line1);
264 pub fn flip_picture(pic: &mut NABufferType) -> ScaleResult<()> {
266 NABufferType::Video(ref mut vb) => {
267 let ncomp = vb.get_num_components();
268 for comp in 0..ncomp {
269 let off = vb.get_offset(comp);
270 let stride = vb.get_stride(comp);
271 let (_, h) = vb.get_dimensions(comp);
272 let data = vb.get_data_mut().unwrap();
273 let mut line0 = vec![0; stride];
274 let mut line1 = vec![0; stride];
275 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
278 NABufferType::Video16(ref mut vb) => {
279 let ncomp = vb.get_num_components();
280 for comp in 0..ncomp {
281 let off = vb.get_offset(comp);
282 let stride = vb.get_stride(comp);
283 let (_, h) = vb.get_dimensions(comp);
284 let data = vb.get_data_mut().unwrap();
285 let mut line0 = vec![0; stride];
286 let mut line1 = vec![0; stride];
287 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
290 NABufferType::Video32(ref mut vb) => {
291 let ncomp = vb.get_num_components();
292 for comp in 0..ncomp {
293 let off = vb.get_offset(comp);
294 let stride = vb.get_stride(comp);
295 let (_, h) = vb.get_dimensions(comp);
296 let data = vb.get_data_mut().unwrap();
297 let mut line0 = vec![0; stride];
298 let mut line1 = vec![0; stride];
299 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
302 NABufferType::VideoPacked(ref mut vb) => {
303 let ncomp = vb.get_num_components();
304 for comp in 0..ncomp {
305 let off = vb.get_offset(comp);
306 let stride = vb.get_stride(comp);
307 let (_, h) = vb.get_dimensions(comp);
308 let data = vb.get_data_mut().unwrap();
309 let mut line0 = vec![0; stride];
310 let mut line1 = vec![0; stride];
311 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
314 _ => { return Err(ScaleError::InvalidArgument); },
320 pub fn new(fmt_in: ScaleInfo, fmt_out: ScaleInfo) -> ScaleResult<Self> {
322 let just_convert = (fmt_in.width == fmt_out.width) && (fmt_in.height == fmt_out.height);
323 if fmt_in != fmt_out {
324 pipeline = build_pipeline(&fmt_in, &fmt_out, just_convert)?;
328 Ok(Self { fmt_in, fmt_out, just_convert, pipeline })
330 pub fn needs_processing(&self) -> bool { self.pipeline.is_some() }
331 pub fn get_in_fmt(&self) -> ScaleInfo { self.fmt_in }
332 pub fn get_out_fmt(&self) -> ScaleInfo { self.fmt_out }
333 pub fn convert(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType) -> ScaleResult<()> {
334 let in_info = pic_in.get_video_info();
335 let out_info = pic_out.get_video_info();
336 if in_info.is_none() || out_info.is_none() { return Err(ScaleError::InvalidArgument); }
337 let in_info = in_info.unwrap();
338 let out_info = out_info.unwrap();
339 if self.just_convert &&
340 (in_info.get_width() != out_info.get_width() || in_info.get_height() != out_info.get_height()) {
341 return Err(ScaleError::InvalidArgument);
343 let needs_flip = in_info.is_flipped() ^ out_info.is_flipped();
344 check_format(in_info, &self.fmt_in, self.just_convert)?;
345 check_format(out_info, &self.fmt_out, self.just_convert)?;
346 let ret = if let Some(ref mut pipe) = self.pipeline {
347 pipe.process(pic_in, pic_out)
349 copy(pic_in, pic_out);
352 if ret.is_ok() && needs_flip {
353 flip_picture(pic_out)?;
363 fn fill_pic(pic: &mut NABufferType, val: u8) {
364 if let Some(ref mut buf) = pic.get_vbuf() {
365 let data = buf.get_data_mut().unwrap();
366 for el in data.iter_mut() { *el = val; }
367 } else if let Some(ref mut buf) = pic.get_vbuf16() {
368 let data = buf.get_data_mut().unwrap();
369 for el in data.iter_mut() { *el = val as u16; }
370 } else if let Some(ref mut buf) = pic.get_vbuf32() {
371 let data = buf.get_data_mut().unwrap();
372 for el in data.iter_mut() { *el = (val as u32) * 0x01010101; }
377 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(1, 1, false, RGB565_FORMAT), 3).unwrap();
378 fill_pic(&mut in_pic, 42);
379 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(1, 1, false, RGB24_FORMAT), 3).unwrap();
380 fill_pic(&mut out_pic, 0);
381 let ifmt = get_scale_fmt_from_pic(&in_pic);
382 let ofmt = get_scale_fmt_from_pic(&out_pic);
383 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
384 scaler.convert(&in_pic, &mut out_pic).unwrap();
385 let obuf = out_pic.get_vbuf().unwrap();
386 let odata = obuf.get_data();
387 assert_eq!(odata[0], 0x0);
388 assert_eq!(odata[1], 0x4);
389 assert_eq!(odata[2], 0x52);
391 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, RGB24_FORMAT), 3).unwrap();
392 fill_pic(&mut in_pic, 42);
393 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, YUV420_FORMAT), 3).unwrap();
394 fill_pic(&mut out_pic, 0);
395 let ifmt = get_scale_fmt_from_pic(&in_pic);
396 let ofmt = get_scale_fmt_from_pic(&out_pic);
397 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
398 scaler.convert(&in_pic, &mut out_pic).unwrap();
399 let obuf = out_pic.get_vbuf().unwrap();
400 let yoff = obuf.get_offset(0);
401 let uoff = obuf.get_offset(1);
402 let voff = obuf.get_offset(2);
403 let odata = obuf.get_data();
404 assert_eq!(odata[yoff], 42);
405 assert!(((odata[uoff] ^ 0x80) as i8).abs() <= 1);
406 assert!(((odata[voff] ^ 0x80) as i8).abs() <= 1);
407 let mut scaler = NAScale::new(ofmt, ifmt).unwrap();
408 scaler.convert(&out_pic, &mut in_pic).unwrap();
409 let obuf = in_pic.get_vbuf().unwrap();
410 let odata = obuf.get_data();
411 assert_eq!(odata[0], 42);
415 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(2, 2, false, RGB565_FORMAT), 3).unwrap();
416 fill_pic(&mut in_pic, 42);
417 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(3, 3, false, RGB565_FORMAT), 3).unwrap();
418 fill_pic(&mut out_pic, 0);
419 let ifmt = get_scale_fmt_from_pic(&in_pic);
420 let ofmt = get_scale_fmt_from_pic(&out_pic);
421 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
422 scaler.convert(&in_pic, &mut out_pic).unwrap();
423 let obuf = out_pic.get_vbuf16().unwrap();
424 let odata = obuf.get_data();
425 assert_eq!(odata[0], 42);
428 fn test_scale_and_convert() {
429 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(7, 3, false, RGB565_FORMAT), 3).unwrap();
430 fill_pic(&mut in_pic, 42);
431 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, YUV420_FORMAT), 3).unwrap();
432 fill_pic(&mut out_pic, 0);
433 let ifmt = get_scale_fmt_from_pic(&in_pic);
434 let ofmt = get_scale_fmt_from_pic(&out_pic);
435 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
436 scaler.convert(&in_pic, &mut out_pic).unwrap();
437 let obuf = out_pic.get_vbuf().unwrap();
438 let yoff = obuf.get_offset(0);
439 let uoff = obuf.get_offset(1);
440 let voff = obuf.get_offset(2);
441 let odata = obuf.get_data();
442 assert_eq!(odata[yoff], 28);
443 assert_eq!(odata[uoff], 154);
444 assert_eq!(odata[voff], 103);