}
impl Stage {
- fn new(name: &str, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo) -> ScaleResult<Self> {
+ fn new(name: &str, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo, options: &[(String, String)]) -> ScaleResult<Self> {
let mut worker = KernelDesc::find(name)?;
- let tmp_pic = worker.init(in_fmt, dest_fmt)?;
+ let tmp_pic = worker.init(in_fmt, dest_fmt, options)?;
let fmt_out = get_scale_fmt_from_pic(&tmp_pic);
Ok(Self { fmt_out, tmp_pic, next: None, worker })
}
fn copy(pic_in: &NABufferType, pic_out: &mut NABufferType)
{
if let (Some(ref sbuf), Some(ref mut dbuf)) = (pic_in.get_vbuf(), pic_out.get_vbuf()) {
+ if sbuf.get_info().get_format().is_paletted() {
+ let same = sbuf.get_stride(0) == dbuf.get_stride(0) && sbuf.get_offset(1) == dbuf.get_offset(1);
+ if same {
+ let src = sbuf.get_data();
+ let dst = dbuf.get_data_mut().unwrap();
+ dst.copy_from_slice(src);
+ } else {
+ let (_, h) = sbuf.get_dimensions(0);
+ let soff = sbuf.get_offset(0);
+ let spoff = sbuf.get_offset(1);
+ let sstride = sbuf.get_stride(0);
+ let src = sbuf.get_data();
+ let doff = dbuf.get_offset(0);
+ let dpoff = dbuf.get_offset(1);
+ let dstride = dbuf.get_stride(0);
+ let dst = dbuf.get_data_mut().unwrap();
+ let copy_size = sstride.min(dstride);
+ for (dline, sline) in dst[doff..].chunks_exact_mut(dstride).take(h).zip(src[soff..].chunks_exact(sstride)) {
+ dline[..copy_size].copy_from_slice(&sline[..copy_size]);
+ }
+ dst[dpoff..].copy_from_slice(&src[spoff..]);
+ }
+ return;
+ }
let mut same = true;
let num_components = sbuf.get_info().get_format().get_num_comp();
for i in 0..num_components {
let dst = &mut ddata[doff..];
let copy_size = sstride.min(dstride);
for (dline, sline) in dst.chunks_exact_mut(dstride).take(h).zip(src.chunks_exact(sstride)) {
- (&mut dline[..copy_size]).copy_from_slice(&sline[..copy_size]);
+ dline[..copy_size].copy_from_slice(&sline[..copy_size]);
}
}
}
let dst = &mut ddata[doff..];
let copy_size = sstride.min(dstride);
for (dline, sline) in dst.chunks_exact_mut(dstride).take(h).zip(src.chunks_exact(sstride)) {
- (&mut dline[..copy_size]).copy_from_slice(&sline[..copy_size]);
+ dline[..copy_size].copy_from_slice(&sline[..copy_size]);
}
}
}
}
false
}
-fn build_pipeline(ifmt: &ScaleInfo, ofmt: &ScaleInfo, just_convert: bool) -> ScaleResult<Option<Stage>> {
+fn fmt_needs_scale(ifmt: &NAPixelFormaton, ofmt: &NAPixelFormaton) -> bool {
+ for (ichr, ochr) in ifmt.comp_info.iter().zip(ofmt.comp_info.iter()) {
+ if let (Some(ic), Some(oc)) = (ichr, ochr) {
+ if ic.h_ss != oc.h_ss || ic.v_ss != oc.v_ss {
+ return true;
+ }
+ }
+ }
+ false
+}
+fn build_pipeline(ifmt: &ScaleInfo, ofmt: &ScaleInfo, just_convert: bool, options: &[(String, String)]) -> ScaleResult<Option<Stage>> {
+ let mut debug = false;
+ for (name, value) in options.iter() {
+ if name == "debug" && (value.is_empty() || value == "true") {
+ debug = true;
+ break;
+ }
+ }
+
let inname = ifmt.fmt.get_model().get_short_name();
let outname = ofmt.fmt.get_model().get_short_name();
-println!("convert {} -> {}", ifmt, ofmt);
- let needs_scale = if (ofmt.fmt.get_max_subsampling() > 0) &&
- (ofmt.fmt.get_max_subsampling() != ifmt.fmt.get_max_subsampling()) {
+ if debug {
+ println!("convert {} -> {}", ifmt, ofmt);
+ }
+ let needs_scale = if fmt_needs_scale(&ifmt.fmt, &ofmt.fmt) {
true
} else {
!just_convert
let needs_unpack = !ifmt.fmt.is_unpacked();
let needs_pack = !ofmt.fmt.is_unpacked();
let needs_convert = inname != outname;
- let scale_before_cvt = is_better_fmt(&ifmt, &ofmt) && needs_convert
+ let scale_before_cvt = is_better_fmt(ifmt, ofmt) && needs_convert
&& (ofmt.fmt.get_max_subsampling() == 0);
let needs_palettise = ofmt.fmt.palette;
//todo stages for model and gamma conversion
let mut cur_fmt = *ifmt;
if needs_unpack {
-println!("[adding unpack]");
+ if debug {
+ println!("[adding unpack]");
+ }
let new_stage = if !cur_fmt.fmt.is_paletted() {
- Stage::new("unpack", &cur_fmt, &ofmt)?
+ Stage::new("unpack", &cur_fmt, ofmt, options)?
} else {
- Stage::new("depal", &cur_fmt, &ofmt)?
+ Stage::new("depal", &cur_fmt, ofmt, options)?
};
cur_fmt = new_stage.fmt_out;
add_stage!(stages, new_stage);
}
if needs_scale && scale_before_cvt {
-println!("[adding scale]");
- let new_stage = Stage::new("scale", &cur_fmt, &ofmt)?;
+ if debug {
+ println!("[adding scale]");
+ }
+ let new_stage = Stage::new("scale", &cur_fmt, ofmt, options)?;
cur_fmt = new_stage.fmt_out;
add_stage!(stages, new_stage);
}
if needs_convert {
-println!("[adding convert]");
+ if debug {
+ println!("[adding convert]");
+ }
let cvtname = format!("{}_to_{}", inname, outname);
-println!("[{}]", cvtname);
- let new_stage = Stage::new(&cvtname, &cur_fmt, &ofmt)?;
+ if debug {
+ println!("[{}]", cvtname);
+ }
+ let new_stage = Stage::new(&cvtname, &cur_fmt, ofmt, options)?;
//todo if fails try converting via RGB or YUV
cur_fmt = new_stage.fmt_out;
add_stage!(stages, new_stage);
//todo alpha plane copy/add
}
if needs_scale && !scale_before_cvt {
-println!("[adding scale]");
- let new_stage = Stage::new("scale", &cur_fmt, &ofmt)?;
+ if debug {
+ println!("[adding scale]");
+ }
+ let new_stage = Stage::new("scale", &cur_fmt, ofmt, options)?;
cur_fmt = new_stage.fmt_out;
add_stage!(stages, new_stage);
}
if needs_pack && !needs_palettise {
-println!("[adding pack]");
- let new_stage = Stage::new("pack", &cur_fmt, &ofmt)?;
+ if debug {
+ println!("[adding pack]");
+ }
+ let new_stage = Stage::new("pack", &cur_fmt, ofmt, options)?;
//cur_fmt = new_stage.fmt_out;
add_stage!(stages, new_stage);
}
if needs_palettise {
-println!("[adding palettise]");
- let new_stage = Stage::new("palette", &cur_fmt, &ofmt)?;
+ if debug {
+ println!("[adding palettise]");
+ }
+ let new_stage = Stage::new("palette", &cur_fmt, ofmt, options)?;
//cur_fmt = new_stage.fmt_out;
add_stage!(stages, new_stage);
}
for _ in 0..h/2 {
line0.copy_from_slice(&data[doff0..][..stride]);
line1.copy_from_slice(&data[doff1..][..stride]);
- (&mut data[doff1..][..stride]).copy_from_slice(line0);
- (&mut data[doff0..][..stride]).copy_from_slice(line1);
+ data[doff1..][..stride].copy_from_slice(line0);
+ data[doff0..][..stride].copy_from_slice(line1);
doff0 += stride;
doff1 -= stride;
}
impl NAScale {
/// Constructs a new `NAScale` instance.
pub fn new(fmt_in: ScaleInfo, fmt_out: ScaleInfo) -> ScaleResult<Self> {
- let pipeline;
let just_convert = (fmt_in.width == fmt_out.width) && (fmt_in.height == fmt_out.height);
- if fmt_in != fmt_out {
- pipeline = build_pipeline(&fmt_in, &fmt_out, just_convert)?;
- } else {
- pipeline = None;
- }
+ let pipeline = if fmt_in != fmt_out {
+ build_pipeline(&fmt_in, &fmt_out, just_convert, &[])?
+ } else {
+ None
+ };
+ Ok(Self { fmt_in, fmt_out, just_convert, pipeline })
+ }
+ /// Constructs a new `NAScale` instance taking into account provided options.
+ pub fn new_with_options(fmt_in: ScaleInfo, fmt_out: ScaleInfo, options: &[(String, String)]) -> ScaleResult<Self> {
+ let just_convert = (fmt_in.width == fmt_out.width) && (fmt_in.height == fmt_out.height);
+ let pipeline = if fmt_in != fmt_out {
+ build_pipeline(&fmt_in, &fmt_out, just_convert, options)?
+ } else {
+ None
+ };
Ok(Self { fmt_in, fmt_out, just_convert, pipeline })
}
/// Checks whether requested conversion operation is needed at all.
assert_eq!(odata[paloff + 1], 129);
assert_eq!(odata[paloff + 2], 170);
}
+ #[test]
+ fn test_scale_modes() {
+ const IN_DATA: [[u8; 6]; 2] = [
+ [0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0],
+ [0x00, 0x40, 0xC0, 0xFF, 0xC0, 0x40]
+ ];
+ const TEST_DATA: &[(&str, [[u8; 9]; 3])] = &[
+ ("nn",
+ [[0xFF, 0xC0, 0x40, 0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0],
+ [0xFF, 0xC0, 0x40, 0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0],
+ [0x00, 0x40, 0xC0, 0x00, 0x40, 0xC0, 0xFF, 0xC0, 0x40]]),
+ ("bilin",
+ [[0xFF, 0xC0, 0x40, 0x55, 0x6A, 0x95, 0x00, 0x40, 0xC0],
+ [0x55, 0x6A, 0x95, 0x8D, 0x86, 0x78, 0xAA, 0x95, 0x6A],
+ [0x00, 0x40, 0xC0, 0xAA, 0x95, 0x6A, 0xFF, 0xC0, 0x40]]),
+ ("bicubic",
+ [[0xFF, 0xC0, 0x40, 0x4B, 0x65, 0x9A, 0x00, 0x36, 0xC9],
+ [0x4B, 0x65, 0x9A, 0x94, 0x8A, 0x74, 0xB3, 0x9D, 0x61],
+ [0x00, 0x36, 0xC9, 0xBA, 0x9D, 0x61, 0xFF, 0xD3, 0x2B]]),
+ ("lanczos",
+ [[0xFF, 0xC0, 0x40, 0x4C, 0x66, 0x98, 0x00, 0x31, 0xCD],
+ [0x4C, 0x66, 0x98, 0x91, 0x88, 0x74, 0xB1, 0x9D, 0x5F],
+ [0x00, 0x31, 0xCD, 0xBB, 0x9D, 0x5F, 0xFF, 0xDD, 0x1E]]),
+ ("lanczos2",
+ [[0xFF, 0xC0, 0x40, 0x4F, 0x68, 0x9B, 0x00, 0x35, 0xCD],
+ [0x4F, 0x68, 0x9B, 0x96, 0x8D, 0x79, 0xB3, 0xA0, 0x64],
+ [0x00, 0x35, 0xCD, 0xBE, 0xA1, 0x65, 0xFF, 0xDC, 0x28]]),
+ ];
+
+ let in_pic = alloc_video_buffer(NAVideoInfo::new(2, 2, false, RGB24_FORMAT), 3).unwrap();
+ if let Some(ref mut vbuf) = in_pic.get_vbuf() {
+ let stride = vbuf.get_stride(0);
+ let data = vbuf.get_data_mut().unwrap();
+ for (dline, rline) in data.chunks_mut(stride).zip(IN_DATA.iter()) {
+ dline[..6].copy_from_slice(rline);
+ }
+ } else {
+ panic!("wrong format");
+ }
+ let mut out_pic = alloc_video_buffer(NAVideoInfo::new(3, 3, false, RGB24_FORMAT), 3).unwrap();
+ let ifmt = get_scale_fmt_from_pic(&in_pic);
+ let ofmt = get_scale_fmt_from_pic(&out_pic);
+ for (method, ref_data) in TEST_DATA.iter() {
+ fill_pic(&mut out_pic, 0);
+ let mut scaler = NAScale::new_with_options(ifmt, ofmt, &[("scaler".to_string(), method.to_string())]).unwrap();
+ scaler.convert(&in_pic, &mut out_pic).unwrap();
+ let obuf = out_pic.get_vbuf().unwrap();
+ let ostride = obuf.get_stride(0);
+ let odata = obuf.get_data();
+ for (oline, rline) in odata.chunks(ostride).zip(ref_data.iter()) {
+ for (&a, &b) in oline.iter().zip(rline.iter()) {
+ assert_eq!(a, b);
+ }
+ }
+ }
+ }
}