1 //! Image conversion functionality.
5 //! Convert input image into YUV one and scale down two times.
7 //! use nihav_core::scale::*;
8 //! use nihav_core::formats::{RGB24_FORMAT, YUV420_FORMAT};
9 //! use nihav_core::frame::{alloc_video_buffer, NAVideoInfo};
11 //! let mut in_pic = alloc_video_buffer(NAVideoInfo::new(640, 480, false, RGB24_FORMAT), 4).unwrap();
12 //! let mut out_pic = alloc_video_buffer(NAVideoInfo::new(320, 240, false, YUV420_FORMAT), 4).unwrap();
13 //! let in_fmt = get_scale_fmt_from_pic(&in_pic);
14 //! let out_fmt = get_scale_fmt_from_pic(&out_pic);
15 //! let mut scaler = NAScale::new(in_fmt, out_fmt).unwrap();
16 //! scaler.convert(&in_pic, &mut out_pic).unwrap();
26 #[allow(clippy::module_inception)]
31 pub use crate::scale::palette::{palettise_frame, QuantisationMode, PaletteSearchMode};
33 /// Image format information used by the converter.
34 #[derive(Clone,Copy,PartialEq)]
35 pub struct ScaleInfo {
36 /// Pixel format description.
37 pub fmt: NAPixelFormaton,
44 impl std::fmt::Display for ScaleInfo {
45 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
46 write!(f, "({}x{}, {})", self.width, self.height, self.fmt)
50 /// A list specifying general image conversion errors.
51 #[derive(Debug,Clone,Copy,PartialEq)]
54 /// Input or output buffer contains no image data.
56 /// Allocation failed.
60 /// Feature is not implemented.
62 /// Internal implementation bug.
66 /// A specialised `Result` type for image conversion operations.
67 pub type ScaleResult<T> = Result<T, ScaleError>;
70 fn init(&mut self, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo) -> ScaleResult<NABufferType>;
71 fn process(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType);
76 create: fn () -> Box<dyn kernel::Kernel>,
80 fn find(name: &str) -> ScaleResult<Box<dyn kernel::Kernel>> {
81 for kern in KERNELS.iter() {
82 if kern.name == name {
83 return Ok((kern.create)());
86 Err(ScaleError::InvalidArgument)
90 const KERNELS: &[KernelDesc] = &[
91 KernelDesc { name: "pack", create: repack::create_pack },
92 KernelDesc { name: "unpack", create: repack::create_unpack },
93 KernelDesc { name: "depal", create: repack::create_depal },
94 KernelDesc { name: "palette", create: palette::create_palettise },
95 KernelDesc { name: "scale", create: scale::create_scale },
96 KernelDesc { name: "shallow", create: depth::create_shallow },
97 KernelDesc { name: "fill", create: fill::create_fill },
98 KernelDesc { name: "rgb_to_yuv", create: colorcvt::create_rgb2yuv },
99 KernelDesc { name: "yuv_to_rgb", create: colorcvt::create_yuv2rgb },
104 tmp_pic: NABufferType,
105 next: Option<Box<Stage>>,
106 worker: Box<dyn kernel::Kernel>,
109 /// Converts input picture information into format used by scaler.
110 pub fn get_scale_fmt_from_pic(pic: &NABufferType) -> ScaleInfo {
111 let info = pic.get_video_info().unwrap();
112 ScaleInfo { fmt: info.get_format(), width: info.get_width(), height: info.get_height() }
116 fn new(name: &str, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo, options: &[(String, String)]) -> ScaleResult<Self> {
117 let mut worker = KernelDesc::find(name)?;
118 let tmp_pic = worker.init(in_fmt, dest_fmt, options)?;
119 let fmt_out = get_scale_fmt_from_pic(&tmp_pic);
120 Ok(Self { fmt_out, tmp_pic, next: None, worker })
122 fn add(&mut self, new: Stage) {
123 if let Some(ref mut next) = self.next {
126 self.next = Some(Box::new(new));
129 fn process(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType) -> ScaleResult<()> {
130 if let Some(ref mut nextstage) = self.next {
131 self.worker.process(pic_in, &mut self.tmp_pic);
132 nextstage.process(&self.tmp_pic, pic_out)?;
134 self.worker.process(pic_in, pic_out);
138 fn drop_last_tmp(&mut self) {
139 if let Some(ref mut nextstage) = self.next {
140 nextstage.drop_last_tmp();
142 self.tmp_pic = NABufferType::None;
147 /// Image format converter.
152 pipeline: Option<Stage>,
155 fn check_format(in_fmt: NAVideoInfo, ref_fmt: &ScaleInfo, just_convert: bool) -> ScaleResult<()> {
156 if in_fmt.get_format() != ref_fmt.fmt { return Err(ScaleError::InvalidArgument); }
157 if !just_convert && (in_fmt.get_width() != ref_fmt.width || in_fmt.get_height() != ref_fmt.height) {
158 return Err(ScaleError::InvalidArgument);
163 fn copy(pic_in: &NABufferType, pic_out: &mut NABufferType)
165 if let (Some(ref sbuf), Some(ref mut dbuf)) = (pic_in.get_vbuf(), pic_out.get_vbuf()) {
166 if sbuf.get_info().get_format().is_paletted() {
167 let same = sbuf.get_stride(0) == dbuf.get_stride(0) && sbuf.get_offset(1) == dbuf.get_offset(1);
169 let src = sbuf.get_data();
170 let dst = dbuf.get_data_mut().unwrap();
171 dst.copy_from_slice(src);
173 let (_, h) = sbuf.get_dimensions(0);
174 let soff = sbuf.get_offset(0);
175 let spoff = sbuf.get_offset(1);
176 let sstride = sbuf.get_stride(0);
177 let src = sbuf.get_data();
178 let doff = dbuf.get_offset(0);
179 let dpoff = dbuf.get_offset(1);
180 let dstride = dbuf.get_stride(0);
181 let dst = dbuf.get_data_mut().unwrap();
182 let copy_size = sstride.min(dstride);
183 for (dline, sline) in dst[doff..].chunks_exact_mut(dstride).take(h).zip(src[soff..].chunks_exact(sstride)) {
184 dline[..copy_size].copy_from_slice(&sline[..copy_size]);
186 dst[dpoff..].copy_from_slice(&src[spoff..]);
191 let src_components = sbuf.get_info().get_format().get_num_comp();
192 let dst_components = dbuf.get_info().get_format().get_num_comp();
193 for i in 0..src_components.max(dst_components) {
194 if sbuf.get_stride(i) != dbuf.get_stride(i) {
198 if sbuf.get_offset(i) != dbuf.get_offset(i) {
204 let sdata = sbuf.get_data();
205 let ddata = dbuf.get_data_mut().unwrap();
206 ddata.copy_from_slice(&sdata[0..]);
208 let sdata = sbuf.get_data();
209 for comp in 0..src_components.min(dst_components) {
210 let (_, h) = sbuf.get_dimensions(comp);
211 let src = &sdata[sbuf.get_offset(comp)..];
212 let sstride = sbuf.get_stride(comp);
213 let doff = dbuf.get_offset(comp);
214 let dstride = dbuf.get_stride(comp);
215 let ddata = dbuf.get_data_mut().unwrap();
216 let dst = &mut ddata[doff..];
217 let copy_size = sstride.min(dstride);
218 for (dline, sline) in dst.chunks_exact_mut(dstride).take(h).zip(src.chunks_exact(sstride)) {
219 dline[..copy_size].copy_from_slice(&sline[..copy_size]);
223 } else if let (Some(ref sbuf), Some(ref mut dbuf)) = (pic_in.get_vbuf16(), pic_out.get_vbuf16()) {
225 let src_components = sbuf.get_info().get_format().get_num_comp();
226 let dst_components = dbuf.get_info().get_format().get_num_comp();
227 for i in 0..src_components.max(dst_components) {
228 if sbuf.get_stride(i) != dbuf.get_stride(i) {
232 if sbuf.get_offset(i) != dbuf.get_offset(i) {
238 let sdata = sbuf.get_data();
239 let ddata = dbuf.get_data_mut().unwrap();
240 let copy_len = sdata.len().min(ddata.len());
241 ddata[..copy_len].copy_from_slice(&sdata[..copy_len]);
243 let sdata = sbuf.get_data();
244 for comp in 0..src_components.min(dst_components) {
245 let (_, h) = sbuf.get_dimensions(comp);
246 let src = &sdata[sbuf.get_offset(comp)..];
247 let sstride = sbuf.get_stride(comp);
248 let doff = dbuf.get_offset(comp);
249 let dstride = dbuf.get_stride(comp);
250 let ddata = dbuf.get_data_mut().unwrap();
251 let dst = &mut ddata[doff..];
252 let copy_size = sstride.min(dstride);
253 for (dline, sline) in dst.chunks_exact_mut(dstride).take(h).zip(src.chunks_exact(sstride)) {
254 dline[..copy_size].copy_from_slice(&sline[..copy_size]);
263 macro_rules! add_stage {
264 ($head:expr, $new:expr) => {
265 if let Some(ref mut h) = $head {
272 fn is_better_fmt(a: &ScaleInfo, b: &ScaleInfo) -> bool {
273 if (a.width >= b.width) && (a.height >= b.height) {
276 if a.fmt.get_max_depth() > b.fmt.get_max_depth() {
279 if a.fmt.get_max_subsampling() < b.fmt.get_max_subsampling() {
284 fn fmt_needs_scale(ifmt: &NAPixelFormaton, ofmt: &NAPixelFormaton) -> bool {
285 for (ichr, ochr) in ifmt.comp_info.iter().zip(ofmt.comp_info.iter()) {
286 if let (Some(ic), Some(oc)) = (ichr, ochr) {
287 if ic.h_ss != oc.h_ss || ic.v_ss != oc.v_ss {
294 fn build_pipeline(ifmt: &ScaleInfo, ofmt: &ScaleInfo, just_convert: bool, options: &[(String, String)]) -> ScaleResult<Option<Stage>> {
295 let mut debug = false;
296 for (name, value) in options.iter() {
297 if name == "debug" && (value.is_empty() || value == "true") {
303 let inname = ifmt.fmt.get_model().get_short_name();
304 let outname = ofmt.fmt.get_model().get_short_name();
307 println!("convert {} -> {}", ifmt, ofmt);
309 let needs_scale = if fmt_needs_scale(&ifmt.fmt, &ofmt.fmt) {
314 let needs_unpack = !ifmt.fmt.is_unpacked();
315 let needs_pack = !ofmt.fmt.is_unpacked();
316 let needs_convert = inname != outname;
317 let scale_before_cvt = is_better_fmt(ifmt, ofmt) && needs_convert
318 && (ofmt.fmt.get_max_subsampling() == 0);
319 let needs_palettise = ofmt.fmt.palette;
320 //todo stages for model and gamma conversion
322 let mut stages: Option<Stage> = None;
323 let mut cur_fmt = *ifmt;
327 println!("[adding unpack]");
329 let new_stage = if !cur_fmt.fmt.is_paletted() {
330 Stage::new("unpack", &cur_fmt, ofmt, options)?
332 Stage::new("depal", &cur_fmt, ofmt, options)?
334 cur_fmt = new_stage.fmt_out;
335 add_stage!(stages, new_stage);
337 if needs_scale && scale_before_cvt {
339 println!("[adding scale]");
341 let new_stage = Stage::new("scale", &cur_fmt, ofmt, options)?;
342 cur_fmt = new_stage.fmt_out;
343 add_stage!(stages, new_stage);
347 println!("[adding convert]");
349 let cvtname = format!("{}_to_{}", inname, outname);
351 println!("[{}]", cvtname);
353 let new_stage = Stage::new(&cvtname, &cur_fmt, ofmt, options)?;
354 //todo if fails try converting via RGB or YUV
355 cur_fmt = new_stage.fmt_out;
356 add_stage!(stages, new_stage);
357 //todo alpha plane copy/add
359 if needs_scale && !scale_before_cvt {
361 println!("[adding scale]");
363 let new_stage = Stage::new("scale", &cur_fmt, ofmt, options)?;
364 cur_fmt = new_stage.fmt_out;
365 add_stage!(stages, new_stage);
367 let is_in_high_bd = cur_fmt.fmt.get_max_depth() > 8;
368 let is_out_high_bd = ofmt.fmt.get_max_depth() > 8;
369 if is_in_high_bd && !is_out_high_bd {
371 println!("[adding shallow]");
373 let new_stage = Stage::new("shallow", &cur_fmt, ofmt, options)?;
374 cur_fmt = new_stage.fmt_out;
375 add_stage!(stages, new_stage);
377 let icomponents = cur_fmt.fmt.components - if cur_fmt.fmt.alpha { 1 } else { 0 };
378 let ocomponents = ofmt.fmt.components - if ofmt.fmt.alpha { 1 } else { 0 };
379 if !needs_palettise && ((!cur_fmt.fmt.alpha && ofmt.fmt.alpha) || (icomponents < ocomponents)) {
381 println!("[adding fill]");
383 let new_stage = Stage::new("fill", &cur_fmt, ofmt, options)?;
384 cur_fmt = new_stage.fmt_out;
385 add_stage!(stages, new_stage);
387 if needs_pack && !needs_palettise {
389 println!("[adding pack]");
391 let new_stage = Stage::new("pack", &cur_fmt, ofmt, options)?;
392 //cur_fmt = new_stage.fmt_out;
393 add_stage!(stages, new_stage);
397 println!("[adding palettise]");
399 let new_stage = Stage::new("palette", &cur_fmt, ofmt, options)?;
400 //cur_fmt = new_stage.fmt_out;
401 add_stage!(stages, new_stage);
404 if let Some(ref mut head) = stages {
405 head.drop_last_tmp();
411 fn swap_plane<T:Copy>(data: &mut [T], stride: usize, h: usize, line0: &mut [T], line1: &mut [T]) {
413 let mut doff1 = stride * (h - 1);
415 line0.copy_from_slice(&data[doff0..][..stride]);
416 line1.copy_from_slice(&data[doff1..][..stride]);
417 data[doff1..][..stride].copy_from_slice(line0);
418 data[doff0..][..stride].copy_from_slice(line1);
424 /// Flips the picture contents.
425 pub fn flip_picture(pic: &mut NABufferType) -> ScaleResult<()> {
427 NABufferType::Video(ref mut vb) => {
428 let ncomp = vb.get_num_components();
429 for comp in 0..ncomp {
430 let off = vb.get_offset(comp);
431 let stride = vb.get_stride(comp);
432 let (_, h) = vb.get_dimensions(comp);
433 let data = vb.get_data_mut().unwrap();
434 let mut line0 = vec![0; stride];
435 let mut line1 = vec![0; stride];
436 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
439 NABufferType::Video16(ref mut vb) => {
440 let ncomp = vb.get_num_components().max(1);
441 for comp in 0..ncomp {
442 let off = vb.get_offset(comp);
443 let stride = vb.get_stride(comp);
444 let (_, h) = vb.get_dimensions(comp);
445 let data = vb.get_data_mut().unwrap();
446 let mut line0 = vec![0; stride];
447 let mut line1 = vec![0; stride];
448 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
451 NABufferType::Video32(ref mut vb) => {
452 let ncomp = vb.get_num_components().max(1);
453 for comp in 0..ncomp {
454 let off = vb.get_offset(comp);
455 let stride = vb.get_stride(comp);
456 let (_, h) = vb.get_dimensions(comp);
457 let data = vb.get_data_mut().unwrap();
458 let mut line0 = vec![0; stride];
459 let mut line1 = vec![0; stride];
460 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
463 NABufferType::VideoPacked(ref mut vb) => {
464 let ncomp = vb.get_num_components();
465 for comp in 0..ncomp {
466 let off = vb.get_offset(comp);
467 let stride = vb.get_stride(comp);
468 let (_, h) = vb.get_dimensions(comp);
469 let data = vb.get_data_mut().unwrap();
470 let mut line0 = vec![0; stride];
471 let mut line1 = vec![0; stride];
472 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
474 if ncomp == 0 && vb.get_stride(0) != 0 {
475 let off = vb.get_offset(0);
476 let stride = vb.get_stride(0);
477 let (_, h) = vb.get_dimensions(0);
478 let data = vb.get_data_mut().unwrap();
479 let mut line0 = vec![0; stride];
480 let mut line1 = vec![0; stride];
481 swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice());
484 _ => { return Err(ScaleError::InvalidArgument); },
490 /// Constructs a new `NAScale` instance.
491 pub fn new(fmt_in: ScaleInfo, fmt_out: ScaleInfo) -> ScaleResult<Self> {
492 let just_convert = (fmt_in.width == fmt_out.width) && (fmt_in.height == fmt_out.height);
493 let pipeline = if fmt_in != fmt_out {
494 build_pipeline(&fmt_in, &fmt_out, just_convert, &[])?
498 Ok(Self { fmt_in, fmt_out, just_convert, pipeline })
500 /// Constructs a new `NAScale` instance taking into account provided options.
501 pub fn new_with_options(fmt_in: ScaleInfo, fmt_out: ScaleInfo, options: &[(String, String)]) -> ScaleResult<Self> {
502 let just_convert = (fmt_in.width == fmt_out.width) && (fmt_in.height == fmt_out.height);
503 let pipeline = if fmt_in != fmt_out {
504 build_pipeline(&fmt_in, &fmt_out, just_convert, options)?
508 Ok(Self { fmt_in, fmt_out, just_convert, pipeline })
510 /// Checks whether requested conversion operation is needed at all.
511 pub fn needs_processing(&self) -> bool { self.pipeline.is_some() }
512 /// Returns the input image format.
513 pub fn get_in_fmt(&self) -> ScaleInfo { self.fmt_in }
514 /// Returns the output image format.
515 pub fn get_out_fmt(&self) -> ScaleInfo { self.fmt_out }
516 /// Performs the image format conversion.
517 pub fn convert(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType) -> ScaleResult<()> {
518 let in_info = pic_in.get_video_info();
519 let out_info = pic_out.get_video_info();
520 if in_info.is_none() || out_info.is_none() { return Err(ScaleError::InvalidArgument); }
521 let in_info = in_info.unwrap();
522 let out_info = out_info.unwrap();
523 if self.just_convert &&
524 (in_info.get_width() != out_info.get_width() || in_info.get_height() != out_info.get_height()) {
525 return Err(ScaleError::InvalidArgument);
527 let needs_flip = in_info.is_flipped() ^ out_info.is_flipped();
528 check_format(in_info, &self.fmt_in, self.just_convert)?;
529 check_format(out_info, &self.fmt_out, self.just_convert)?;
530 let ret = if let Some(ref mut pipe) = self.pipeline {
531 pipe.process(pic_in, pic_out)
533 copy(pic_in, pic_out);
536 if ret.is_ok() && needs_flip {
537 flip_picture(pic_out)?;
547 fn fill_pic(pic: &mut NABufferType, val: u8) {
548 if let Some(ref mut buf) = pic.get_vbuf() {
549 let data = buf.get_data_mut().unwrap();
550 for el in data.iter_mut() { *el = val; }
551 } else if let Some(ref mut buf) = pic.get_vbuf16() {
552 let data = buf.get_data_mut().unwrap();
553 for el in data.iter_mut() { *el = val as u16; }
554 } else if let Some(ref mut buf) = pic.get_vbuf32() {
555 let data = buf.get_data_mut().unwrap();
556 for el in data.iter_mut() { *el = (val as u32) * 0x01010101; }
561 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(1, 1, false, RGB565_FORMAT), 3).unwrap();
562 fill_pic(&mut in_pic, 42);
563 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(1, 1, false, RGB24_FORMAT), 3).unwrap();
564 fill_pic(&mut out_pic, 0);
565 let ifmt = get_scale_fmt_from_pic(&in_pic);
566 let ofmt = get_scale_fmt_from_pic(&out_pic);
567 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
568 scaler.convert(&in_pic, &mut out_pic).unwrap();
569 let obuf = out_pic.get_vbuf().unwrap();
570 let odata = obuf.get_data();
571 assert_eq!(odata[0], 0x0);
572 assert_eq!(odata[1], 0x4);
573 assert_eq!(odata[2], 0x52);
575 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, RGB24_FORMAT), 3).unwrap();
576 fill_pic(&mut in_pic, 42);
577 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, YUV420_FORMAT), 3).unwrap();
578 fill_pic(&mut out_pic, 0);
579 let ifmt = get_scale_fmt_from_pic(&in_pic);
580 let ofmt = get_scale_fmt_from_pic(&out_pic);
581 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
582 scaler.convert(&in_pic, &mut out_pic).unwrap();
583 let obuf = out_pic.get_vbuf().unwrap();
584 let yoff = obuf.get_offset(0);
585 let uoff = obuf.get_offset(1);
586 let voff = obuf.get_offset(2);
587 let odata = obuf.get_data();
588 assert_eq!(odata[yoff], 42);
589 assert!(((odata[uoff] ^ 0x80) as i8).abs() <= 1);
590 assert!(((odata[voff] ^ 0x80) as i8).abs() <= 1);
591 let mut scaler = NAScale::new(ofmt, ifmt).unwrap();
592 scaler.convert(&out_pic, &mut in_pic).unwrap();
593 let obuf = in_pic.get_vbuf().unwrap();
594 let odata = obuf.get_data();
595 assert_eq!(odata[0], 42);
599 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(2, 2, false, RGB565_FORMAT), 3).unwrap();
600 fill_pic(&mut in_pic, 42);
601 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(3, 3, false, RGB565_FORMAT), 3).unwrap();
602 fill_pic(&mut out_pic, 0);
603 let ifmt = get_scale_fmt_from_pic(&in_pic);
604 let ofmt = get_scale_fmt_from_pic(&out_pic);
605 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
606 scaler.convert(&in_pic, &mut out_pic).unwrap();
607 let obuf = out_pic.get_vbuf16().unwrap();
608 let odata = obuf.get_data();
609 assert_eq!(odata[0], 42);
612 fn test_scale_and_convert() {
613 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(7, 3, false, RGB565_FORMAT), 3).unwrap();
614 fill_pic(&mut in_pic, 42);
615 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, YUV420_FORMAT), 3).unwrap();
616 fill_pic(&mut out_pic, 0);
617 let ifmt = get_scale_fmt_from_pic(&in_pic);
618 let ofmt = get_scale_fmt_from_pic(&out_pic);
619 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
620 scaler.convert(&in_pic, &mut out_pic).unwrap();
621 let obuf = out_pic.get_vbuf().unwrap();
622 let yoff = obuf.get_offset(0);
623 let uoff = obuf.get_offset(1);
624 let voff = obuf.get_offset(2);
625 let odata = obuf.get_data();
626 assert_eq!(odata[yoff], 11);
627 assert_eq!(odata[uoff], 162);
628 assert_eq!(odata[voff], 118);
631 fn test_scale_and_convert_to_pal() {
632 let mut in_pic = alloc_video_buffer(NAVideoInfo::new(7, 3, false, YUV420_FORMAT), 3).unwrap();
633 fill_pic(&mut in_pic, 142);
634 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, PAL8_FORMAT), 0).unwrap();
635 fill_pic(&mut out_pic, 0);
636 let ifmt = get_scale_fmt_from_pic(&in_pic);
637 let ofmt = get_scale_fmt_from_pic(&out_pic);
638 let mut scaler = NAScale::new(ifmt, ofmt).unwrap();
639 scaler.convert(&in_pic, &mut out_pic).unwrap();
640 let obuf = out_pic.get_vbuf().unwrap();
641 let dataoff = obuf.get_offset(0);
642 let paloff = obuf.get_offset(1);
643 let odata = obuf.get_data();
644 assert_eq!(odata[dataoff], 0);
645 assert_eq!(odata[paloff], 157);
646 assert_eq!(odata[paloff + 1], 129);
647 assert_eq!(odata[paloff + 2], 170);
650 fn test_scale_modes() {
651 const IN_DATA: [[u8; 6]; 2] = [
652 [0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0],
653 [0x00, 0x40, 0xC0, 0xFF, 0xC0, 0x40]
655 const TEST_DATA: &[(&str, [[u8; 9]; 3])] = &[
657 [[0xFF, 0xC0, 0x40, 0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0],
658 [0xFF, 0xC0, 0x40, 0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0],
659 [0x00, 0x40, 0xC0, 0x00, 0x40, 0xC0, 0xFF, 0xC0, 0x40]]),
661 [[0xFF, 0xC0, 0x40, 0x55, 0x6A, 0x95, 0x00, 0x40, 0xC0],
662 [0x55, 0x6A, 0x95, 0x8D, 0x86, 0x78, 0xAA, 0x95, 0x6A],
663 [0x00, 0x40, 0xC0, 0xAA, 0x95, 0x6A, 0xFF, 0xC0, 0x40]]),
665 [[0xFF, 0xC0, 0x40, 0x4B, 0x65, 0x9A, 0x00, 0x36, 0xC9],
666 [0x4B, 0x65, 0x9A, 0x94, 0x8A, 0x74, 0xB3, 0x9D, 0x61],
667 [0x00, 0x36, 0xC9, 0xBA, 0x9D, 0x61, 0xFF, 0xD3, 0x2B]]),
669 [[0xFF, 0xC0, 0x40, 0x4C, 0x66, 0x98, 0x00, 0x31, 0xCD],
670 [0x4C, 0x66, 0x98, 0x91, 0x88, 0x74, 0xB1, 0x9D, 0x5F],
671 [0x00, 0x31, 0xCD, 0xBB, 0x9D, 0x5F, 0xFF, 0xDD, 0x1E]]),
673 [[0xFF, 0xC0, 0x40, 0x4F, 0x68, 0x9B, 0x00, 0x35, 0xCD],
674 [0x4F, 0x68, 0x9B, 0x96, 0x8D, 0x79, 0xB3, 0xA0, 0x64],
675 [0x00, 0x35, 0xCD, 0xBE, 0xA1, 0x65, 0xFF, 0xDC, 0x28]]),
678 let in_pic = alloc_video_buffer(NAVideoInfo::new(2, 2, false, RGB24_FORMAT), 3).unwrap();
679 if let Some(ref mut vbuf) = in_pic.get_vbuf() {
680 let stride = vbuf.get_stride(0);
681 let data = vbuf.get_data_mut().unwrap();
682 for (dline, rline) in data.chunks_mut(stride).zip(IN_DATA.iter()) {
683 dline[..6].copy_from_slice(rline);
686 panic!("wrong format");
688 let mut out_pic = alloc_video_buffer(NAVideoInfo::new(3, 3, false, RGB24_FORMAT), 3).unwrap();
689 let ifmt = get_scale_fmt_from_pic(&in_pic);
690 let ofmt = get_scale_fmt_from_pic(&out_pic);
691 for (method, ref_data) in TEST_DATA.iter() {
692 fill_pic(&mut out_pic, 0);
693 let mut scaler = NAScale::new_with_options(ifmt, ofmt, &[("scaler".to_string(), method.to_string())]).unwrap();
694 scaler.convert(&in_pic, &mut out_pic).unwrap();
695 let obuf = out_pic.get_vbuf().unwrap();
696 let ostride = obuf.get_stride(0);
697 let odata = obuf.get_data();
698 for (oline, rline) in odata.chunks(ostride).zip(ref_data.iter()) {
699 for (&a, &b) in oline.iter().zip(rline.iter()) {