]>
Commit | Line | Data |
---|---|---|
30d57e4a KS |
1 | //! Image conversion functionality. |
2 | ||
3 | //! # Examples | |
4 | //! | |
5 | //! Convert input image into YUV one and scale down two times. | |
6 | //! ```no_run | |
7 | //! use nihav_core::scale::*; | |
8 | //! use nihav_core::formats::{RGB24_FORMAT, YUV420_FORMAT}; | |
9 | //! use nihav_core::frame::{alloc_video_buffer, NAVideoInfo}; | |
10 | //! | |
11 | //! let mut in_pic = alloc_video_buffer(NAVideoInfo::new(640, 480, false, RGB24_FORMAT), 4).unwrap(); | |
12 | //! let mut out_pic = alloc_video_buffer(NAVideoInfo::new(320, 240, false, YUV420_FORMAT), 4).unwrap(); | |
13 | //! let in_fmt = get_scale_fmt_from_pic(&in_pic); | |
14 | //! let out_fmt = get_scale_fmt_from_pic(&out_pic); | |
15 | //! let mut scaler = NAScale::new(in_fmt, out_fmt).unwrap(); | |
16 | //! scaler.convert(&in_pic, &mut out_pic).unwrap(); | |
17 | //! ``` | |
03accf76 KS |
18 | use crate::frame::*; |
19 | ||
20 | mod kernel; | |
21 | ||
22 | mod colorcvt; | |
d03595c5 | 23 | mod fill; |
03accf76 | 24 | mod repack; |
b36f412c | 25 | #[allow(clippy::module_inception)] |
03accf76 KS |
26 | mod scale; |
27 | ||
4b459d0b KS |
28 | mod palette; |
29 | ||
30 | pub use crate::scale::palette::{palettise_frame, QuantisationMode, PaletteSearchMode}; | |
31 | ||
30d57e4a | 32 | /// Image format information used by the converter. |
03accf76 KS |
33 | #[derive(Clone,Copy,PartialEq)] |
34 | pub struct ScaleInfo { | |
30d57e4a | 35 | /// Pixel format description. |
03accf76 | 36 | pub fmt: NAPixelFormaton, |
30d57e4a | 37 | /// Image width. |
03accf76 | 38 | pub width: usize, |
30d57e4a | 39 | /// Image height. |
03accf76 KS |
40 | pub height: usize, |
41 | } | |
42 | ||
43 | impl std::fmt::Display for ScaleInfo { | |
44 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { | |
45 | write!(f, "({}x{}, {})", self.width, self.height, self.fmt) | |
46 | } | |
47 | } | |
48 | ||
30d57e4a | 49 | /// A list specifying general image conversion errors. |
03accf76 KS |
50 | #[derive(Debug,Clone,Copy,PartialEq)] |
51 | #[allow(dead_code)] | |
52 | pub enum ScaleError { | |
30d57e4a | 53 | /// Input or output buffer contains no image data. |
03accf76 | 54 | NoFrame, |
30d57e4a | 55 | /// Allocation failed. |
03accf76 | 56 | AllocError, |
30d57e4a | 57 | /// Invalid argument. |
03accf76 | 58 | InvalidArgument, |
30d57e4a | 59 | /// Feature is not implemented. |
03accf76 | 60 | NotImplemented, |
30d57e4a | 61 | /// Internal implementation bug. |
03accf76 KS |
62 | Bug, |
63 | } | |
64 | ||
30d57e4a | 65 | /// A specialised `Result` type for image conversion operations. |
03accf76 KS |
66 | pub type ScaleResult<T> = Result<T, ScaleError>; |
67 | ||
68 | /*trait Kernel { | |
69 | fn init(&mut self, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo) -> ScaleResult<NABufferType>; | |
70 | fn process(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType); | |
71 | }*/ | |
72 | ||
73 | struct KernelDesc { | |
74 | name: &'static str, | |
6011e201 | 75 | create: fn () -> Box<dyn kernel::Kernel>, |
03accf76 KS |
76 | } |
77 | ||
78 | impl KernelDesc { | |
6011e201 | 79 | fn find(name: &str) -> ScaleResult<Box<dyn kernel::Kernel>> { |
03accf76 KS |
80 | for kern in KERNELS.iter() { |
81 | if kern.name == name { | |
82 | return Ok((kern.create)()); | |
83 | } | |
84 | } | |
85 | Err(ScaleError::InvalidArgument) | |
86 | } | |
87 | } | |
88 | ||
89 | const KERNELS: &[KernelDesc] = &[ | |
90 | KernelDesc { name: "pack", create: repack::create_pack }, | |
91 | KernelDesc { name: "unpack", create: repack::create_unpack }, | |
92 | KernelDesc { name: "depal", create: repack::create_depal }, | |
4b459d0b | 93 | KernelDesc { name: "palette", create: palette::create_palettise }, |
03accf76 | 94 | KernelDesc { name: "scale", create: scale::create_scale }, |
d03595c5 | 95 | KernelDesc { name: "fill", create: fill::create_fill }, |
03accf76 KS |
96 | KernelDesc { name: "rgb_to_yuv", create: colorcvt::create_rgb2yuv }, |
97 | KernelDesc { name: "yuv_to_rgb", create: colorcvt::create_yuv2rgb }, | |
98 | ]; | |
99 | ||
100 | struct Stage { | |
101 | fmt_out: ScaleInfo, | |
102 | tmp_pic: NABufferType, | |
103 | next: Option<Box<Stage>>, | |
6011e201 | 104 | worker: Box<dyn kernel::Kernel>, |
03accf76 KS |
105 | } |
106 | ||
30d57e4a | 107 | /// Converts input picture information into format used by scaler. |
03accf76 KS |
108 | pub fn get_scale_fmt_from_pic(pic: &NABufferType) -> ScaleInfo { |
109 | let info = pic.get_video_info().unwrap(); | |
110 | ScaleInfo { fmt: info.get_format(), width: info.get_width(), height: info.get_height() } | |
111 | } | |
112 | ||
113 | impl Stage { | |
25e0bf9a | 114 | fn new(name: &str, in_fmt: &ScaleInfo, dest_fmt: &ScaleInfo, options: &[(String, String)]) -> ScaleResult<Self> { |
03accf76 | 115 | let mut worker = KernelDesc::find(name)?; |
25e0bf9a | 116 | let tmp_pic = worker.init(in_fmt, dest_fmt, options)?; |
03accf76 KS |
117 | let fmt_out = get_scale_fmt_from_pic(&tmp_pic); |
118 | Ok(Self { fmt_out, tmp_pic, next: None, worker }) | |
119 | } | |
120 | fn add(&mut self, new: Stage) { | |
121 | if let Some(ref mut next) = self.next { | |
122 | next.add(new); | |
123 | } else { | |
124 | self.next = Some(Box::new(new)); | |
125 | } | |
126 | } | |
127 | fn process(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType) -> ScaleResult<()> { | |
128 | if let Some(ref mut nextstage) = self.next { | |
129 | self.worker.process(pic_in, &mut self.tmp_pic); | |
130 | nextstage.process(&self.tmp_pic, pic_out)?; | |
131 | } else { | |
132 | self.worker.process(pic_in, pic_out); | |
133 | } | |
134 | Ok(()) | |
135 | } | |
136 | fn drop_last_tmp(&mut self) { | |
137 | if let Some(ref mut nextstage) = self.next { | |
138 | nextstage.drop_last_tmp(); | |
139 | } else { | |
140 | self.tmp_pic = NABufferType::None; | |
141 | } | |
142 | } | |
143 | } | |
144 | ||
30d57e4a | 145 | /// Image format converter. |
03accf76 KS |
146 | pub struct NAScale { |
147 | fmt_in: ScaleInfo, | |
148 | fmt_out: ScaleInfo, | |
149 | just_convert: bool, | |
150 | pipeline: Option<Stage>, | |
151 | } | |
152 | ||
153 | fn check_format(in_fmt: NAVideoInfo, ref_fmt: &ScaleInfo, just_convert: bool) -> ScaleResult<()> { | |
154 | if in_fmt.get_format() != ref_fmt.fmt { return Err(ScaleError::InvalidArgument); } | |
155 | if !just_convert && (in_fmt.get_width() != ref_fmt.width || in_fmt.get_height() != ref_fmt.height) { | |
156 | return Err(ScaleError::InvalidArgument); | |
157 | } | |
158 | Ok(()) | |
159 | } | |
160 | ||
161 | fn copy(pic_in: &NABufferType, pic_out: &mut NABufferType) | |
162 | { | |
163 | if let (Some(ref sbuf), Some(ref mut dbuf)) = (pic_in.get_vbuf(), pic_out.get_vbuf()) { | |
94c520bf KS |
164 | if sbuf.get_info().get_format().is_paletted() { |
165 | let same = sbuf.get_stride(0) == dbuf.get_stride(0) && sbuf.get_offset(1) == dbuf.get_offset(1); | |
166 | if same { | |
167 | let src = sbuf.get_data(); | |
168 | let dst = dbuf.get_data_mut().unwrap(); | |
169 | dst.copy_from_slice(src); | |
170 | } else { | |
171 | let (_, h) = sbuf.get_dimensions(0); | |
172 | let soff = sbuf.get_offset(0); | |
173 | let spoff = sbuf.get_offset(1); | |
174 | let sstride = sbuf.get_stride(0); | |
175 | let src = sbuf.get_data(); | |
176 | let doff = dbuf.get_offset(0); | |
177 | let dpoff = dbuf.get_offset(1); | |
178 | let dstride = dbuf.get_stride(0); | |
179 | let dst = dbuf.get_data_mut().unwrap(); | |
180 | let copy_size = sstride.min(dstride); | |
181 | for (dline, sline) in dst[doff..].chunks_exact_mut(dstride).take(h).zip(src[soff..].chunks_exact(sstride)) { | |
182 | dline[..copy_size].copy_from_slice(&sline[..copy_size]); | |
183 | } | |
184 | dst[dpoff..].copy_from_slice(&src[spoff..]); | |
185 | } | |
186 | return; | |
187 | } | |
79ec1d51 | 188 | let mut same = true; |
720c4ef3 KS |
189 | let src_components = sbuf.get_info().get_format().get_num_comp(); |
190 | let dst_components = dbuf.get_info().get_format().get_num_comp(); | |
191 | for i in 0..src_components.max(dst_components) { | |
79ec1d51 KS |
192 | if sbuf.get_stride(i) != dbuf.get_stride(i) { |
193 | same = false; | |
194 | break; | |
195 | } | |
196 | if sbuf.get_offset(i) != dbuf.get_offset(i) { | |
197 | same = false; | |
198 | break; | |
199 | } | |
200 | } | |
201 | if same { | |
202 | let sdata = sbuf.get_data(); | |
203 | let ddata = dbuf.get_data_mut().unwrap(); | |
204 | ddata.copy_from_slice(&sdata[0..]); | |
205 | } else { | |
206 | let sdata = sbuf.get_data(); | |
720c4ef3 | 207 | for comp in 0..src_components.min(dst_components) { |
79ec1d51 KS |
208 | let (_, h) = sbuf.get_dimensions(comp); |
209 | let src = &sdata[sbuf.get_offset(comp)..]; | |
210 | let sstride = sbuf.get_stride(comp); | |
211 | let doff = dbuf.get_offset(comp); | |
212 | let dstride = dbuf.get_stride(comp); | |
4de972c7 KS |
213 | let ddata = dbuf.get_data_mut().unwrap(); |
214 | let dst = &mut ddata[doff..]; | |
215 | let copy_size = sstride.min(dstride); | |
216 | for (dline, sline) in dst.chunks_exact_mut(dstride).take(h).zip(src.chunks_exact(sstride)) { | |
e6aaad5c | 217 | dline[..copy_size].copy_from_slice(&sline[..copy_size]); |
4de972c7 KS |
218 | } |
219 | } | |
220 | } | |
221 | } else if let (Some(ref sbuf), Some(ref mut dbuf)) = (pic_in.get_vbuf16(), pic_out.get_vbuf16()) { | |
222 | let mut same = true; | |
720c4ef3 KS |
223 | let src_components = sbuf.get_info().get_format().get_num_comp(); |
224 | let dst_components = dbuf.get_info().get_format().get_num_comp(); | |
225 | for i in 0..src_components.max(dst_components) { | |
4de972c7 KS |
226 | if sbuf.get_stride(i) != dbuf.get_stride(i) { |
227 | same = false; | |
228 | break; | |
229 | } | |
230 | if sbuf.get_offset(i) != dbuf.get_offset(i) { | |
231 | same = false; | |
232 | break; | |
233 | } | |
234 | } | |
235 | if same { | |
236 | let sdata = sbuf.get_data(); | |
237 | let ddata = dbuf.get_data_mut().unwrap(); | |
238 | ddata.copy_from_slice(&sdata[0..]); | |
239 | } else { | |
240 | let sdata = sbuf.get_data(); | |
720c4ef3 | 241 | for comp in 0..src_components.min(dst_components) { |
4de972c7 KS |
242 | let (_, h) = sbuf.get_dimensions(comp); |
243 | let src = &sdata[sbuf.get_offset(comp)..]; | |
244 | let sstride = sbuf.get_stride(comp); | |
245 | let doff = dbuf.get_offset(comp); | |
246 | let dstride = dbuf.get_stride(comp); | |
79ec1d51 KS |
247 | let ddata = dbuf.get_data_mut().unwrap(); |
248 | let dst = &mut ddata[doff..]; | |
249 | let copy_size = sstride.min(dstride); | |
250 | for (dline, sline) in dst.chunks_exact_mut(dstride).take(h).zip(src.chunks_exact(sstride)) { | |
e6aaad5c | 251 | dline[..copy_size].copy_from_slice(&sline[..copy_size]); |
79ec1d51 KS |
252 | } |
253 | } | |
254 | } | |
03accf76 KS |
255 | } else { |
256 | unimplemented!(); | |
257 | } | |
258 | } | |
259 | ||
260 | macro_rules! add_stage { | |
261 | ($head:expr, $new:expr) => { | |
262 | if let Some(ref mut h) = $head { | |
263 | h.add($new); | |
264 | } else { | |
265 | $head = Some($new); | |
266 | } | |
267 | }; | |
268 | } | |
269 | fn is_better_fmt(a: &ScaleInfo, b: &ScaleInfo) -> bool { | |
270 | if (a.width >= b.width) && (a.height >= b.height) { | |
271 | return true; | |
272 | } | |
273 | if a.fmt.get_max_depth() > b.fmt.get_max_depth() { | |
274 | return true; | |
275 | } | |
276 | if a.fmt.get_max_subsampling() < b.fmt.get_max_subsampling() { | |
277 | return true; | |
278 | } | |
279 | false | |
280 | } | |
5f031d75 KS |
281 | fn fmt_needs_scale(ifmt: &NAPixelFormaton, ofmt: &NAPixelFormaton) -> bool { |
282 | for (ichr, ochr) in ifmt.comp_info.iter().zip(ofmt.comp_info.iter()) { | |
283 | if let (Some(ic), Some(oc)) = (ichr, ochr) { | |
284 | if ic.h_ss != oc.h_ss || ic.v_ss != oc.v_ss { | |
285 | return true; | |
286 | } | |
287 | } | |
288 | } | |
289 | false | |
290 | } | |
25e0bf9a KS |
291 | fn build_pipeline(ifmt: &ScaleInfo, ofmt: &ScaleInfo, just_convert: bool, options: &[(String, String)]) -> ScaleResult<Option<Stage>> { |
292 | let mut debug = false; | |
293 | for (name, value) in options.iter() { | |
6f263099 | 294 | if name == "debug" && (value.is_empty() || value == "true") { |
25e0bf9a KS |
295 | debug = true; |
296 | break; | |
297 | } | |
298 | } | |
299 | ||
03accf76 KS |
300 | let inname = ifmt.fmt.get_model().get_short_name(); |
301 | let outname = ofmt.fmt.get_model().get_short_name(); | |
302 | ||
25e0bf9a KS |
303 | if debug { |
304 | println!("convert {} -> {}", ifmt, ofmt); | |
305 | } | |
5f031d75 | 306 | let needs_scale = if fmt_needs_scale(&ifmt.fmt, &ofmt.fmt) { |
e243ceb4 KS |
307 | true |
308 | } else { | |
309 | !just_convert | |
310 | }; | |
e9d8cce7 | 311 | let needs_unpack = !ifmt.fmt.is_unpacked(); |
03accf76 | 312 | let needs_pack = !ofmt.fmt.is_unpacked(); |
e243ceb4 | 313 | let needs_convert = inname != outname; |
6f263099 | 314 | let scale_before_cvt = is_better_fmt(ifmt, ofmt) && needs_convert |
03accf76 | 315 | && (ofmt.fmt.get_max_subsampling() == 0); |
4b459d0b | 316 | let needs_palettise = ofmt.fmt.palette; |
03accf76 KS |
317 | //todo stages for model and gamma conversion |
318 | ||
319 | let mut stages: Option<Stage> = None; | |
320 | let mut cur_fmt = *ifmt; | |
321 | ||
322 | if needs_unpack { | |
25e0bf9a KS |
323 | if debug { |
324 | println!("[adding unpack]"); | |
325 | } | |
e243ceb4 | 326 | let new_stage = if !cur_fmt.fmt.is_paletted() { |
6f263099 | 327 | Stage::new("unpack", &cur_fmt, ofmt, options)? |
e243ceb4 | 328 | } else { |
6f263099 | 329 | Stage::new("depal", &cur_fmt, ofmt, options)? |
e243ceb4 | 330 | }; |
03accf76 KS |
331 | cur_fmt = new_stage.fmt_out; |
332 | add_stage!(stages, new_stage); | |
333 | } | |
334 | if needs_scale && scale_before_cvt { | |
25e0bf9a KS |
335 | if debug { |
336 | println!("[adding scale]"); | |
337 | } | |
6f263099 | 338 | let new_stage = Stage::new("scale", &cur_fmt, ofmt, options)?; |
03accf76 KS |
339 | cur_fmt = new_stage.fmt_out; |
340 | add_stage!(stages, new_stage); | |
341 | } | |
342 | if needs_convert { | |
25e0bf9a KS |
343 | if debug { |
344 | println!("[adding convert]"); | |
345 | } | |
03accf76 | 346 | let cvtname = format!("{}_to_{}", inname, outname); |
25e0bf9a KS |
347 | if debug { |
348 | println!("[{}]", cvtname); | |
349 | } | |
6f263099 | 350 | let new_stage = Stage::new(&cvtname, &cur_fmt, ofmt, options)?; |
03accf76 KS |
351 | //todo if fails try converting via RGB or YUV |
352 | cur_fmt = new_stage.fmt_out; | |
353 | add_stage!(stages, new_stage); | |
354 | //todo alpha plane copy/add | |
355 | } | |
356 | if needs_scale && !scale_before_cvt { | |
25e0bf9a KS |
357 | if debug { |
358 | println!("[adding scale]"); | |
359 | } | |
6f263099 | 360 | let new_stage = Stage::new("scale", &cur_fmt, ofmt, options)?; |
03accf76 KS |
361 | cur_fmt = new_stage.fmt_out; |
362 | add_stage!(stages, new_stage); | |
363 | } | |
d03595c5 KS |
364 | let icomponents = cur_fmt.fmt.components - if cur_fmt.fmt.alpha { 1 } else { 0 }; |
365 | let ocomponents = ofmt.fmt.components - if ofmt.fmt.alpha { 1 } else { 0 }; | |
366 | if !needs_palettise && ((!cur_fmt.fmt.alpha && ofmt.fmt.alpha) || (icomponents < ocomponents)) { | |
367 | if debug { | |
368 | println!("[adding fill]"); | |
369 | } | |
370 | let new_stage = Stage::new("fill", &cur_fmt, ofmt, options)?; | |
371 | cur_fmt = new_stage.fmt_out; | |
372 | add_stage!(stages, new_stage); | |
373 | } | |
4b459d0b | 374 | if needs_pack && !needs_palettise { |
25e0bf9a KS |
375 | if debug { |
376 | println!("[adding pack]"); | |
377 | } | |
6f263099 | 378 | let new_stage = Stage::new("pack", &cur_fmt, ofmt, options)?; |
03accf76 KS |
379 | //cur_fmt = new_stage.fmt_out; |
380 | add_stage!(stages, new_stage); | |
381 | } | |
4b459d0b | 382 | if needs_palettise { |
25e0bf9a KS |
383 | if debug { |
384 | println!("[adding palettise]"); | |
385 | } | |
6f263099 | 386 | let new_stage = Stage::new("palette", &cur_fmt, ofmt, options)?; |
4b459d0b KS |
387 | //cur_fmt = new_stage.fmt_out; |
388 | add_stage!(stages, new_stage); | |
389 | } | |
03accf76 KS |
390 | |
391 | if let Some(ref mut head) = stages { | |
392 | head.drop_last_tmp(); | |
393 | } | |
d24468d9 | 394 | |
03accf76 KS |
395 | Ok(stages) |
396 | } | |
397 | ||
085742a3 KS |
398 | fn swap_plane<T:Copy>(data: &mut [T], stride: usize, h: usize, line0: &mut [T], line1: &mut [T]) { |
399 | let mut doff0 = 0; | |
400 | let mut doff1 = stride * (h - 1); | |
401 | for _ in 0..h/2 { | |
402 | line0.copy_from_slice(&data[doff0..][..stride]); | |
403 | line1.copy_from_slice(&data[doff1..][..stride]); | |
e6aaad5c KS |
404 | data[doff1..][..stride].copy_from_slice(line0); |
405 | data[doff0..][..stride].copy_from_slice(line1); | |
085742a3 KS |
406 | doff0 += stride; |
407 | doff1 -= stride; | |
408 | } | |
409 | } | |
410 | ||
30d57e4a | 411 | /// Flips the picture contents. |
085742a3 KS |
412 | pub fn flip_picture(pic: &mut NABufferType) -> ScaleResult<()> { |
413 | match pic { | |
414 | NABufferType::Video(ref mut vb) => { | |
415 | let ncomp = vb.get_num_components(); | |
416 | for comp in 0..ncomp { | |
417 | let off = vb.get_offset(comp); | |
418 | let stride = vb.get_stride(comp); | |
419 | let (_, h) = vb.get_dimensions(comp); | |
420 | let data = vb.get_data_mut().unwrap(); | |
421 | let mut line0 = vec![0; stride]; | |
422 | let mut line1 = vec![0; stride]; | |
423 | swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice()); | |
424 | } | |
425 | }, | |
426 | NABufferType::Video16(ref mut vb) => { | |
36294f20 | 427 | let ncomp = vb.get_num_components().max(1); |
085742a3 KS |
428 | for comp in 0..ncomp { |
429 | let off = vb.get_offset(comp); | |
430 | let stride = vb.get_stride(comp); | |
431 | let (_, h) = vb.get_dimensions(comp); | |
432 | let data = vb.get_data_mut().unwrap(); | |
433 | let mut line0 = vec![0; stride]; | |
434 | let mut line1 = vec![0; stride]; | |
435 | swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice()); | |
436 | } | |
437 | }, | |
438 | NABufferType::Video32(ref mut vb) => { | |
36294f20 | 439 | let ncomp = vb.get_num_components().max(1); |
085742a3 KS |
440 | for comp in 0..ncomp { |
441 | let off = vb.get_offset(comp); | |
442 | let stride = vb.get_stride(comp); | |
443 | let (_, h) = vb.get_dimensions(comp); | |
444 | let data = vb.get_data_mut().unwrap(); | |
445 | let mut line0 = vec![0; stride]; | |
446 | let mut line1 = vec![0; stride]; | |
447 | swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice()); | |
448 | } | |
449 | }, | |
450 | NABufferType::VideoPacked(ref mut vb) => { | |
451 | let ncomp = vb.get_num_components(); | |
452 | for comp in 0..ncomp { | |
453 | let off = vb.get_offset(comp); | |
454 | let stride = vb.get_stride(comp); | |
455 | let (_, h) = vb.get_dimensions(comp); | |
456 | let data = vb.get_data_mut().unwrap(); | |
5737aac9 KS |
457 | let mut line0 = vec![0; stride]; |
458 | let mut line1 = vec![0; stride]; | |
459 | swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice()); | |
460 | } | |
461 | if ncomp == 0 && vb.get_stride(0) != 0 { | |
462 | let off = vb.get_offset(0); | |
463 | let stride = vb.get_stride(0); | |
464 | let (_, h) = vb.get_dimensions(0); | |
465 | let data = vb.get_data_mut().unwrap(); | |
085742a3 KS |
466 | let mut line0 = vec![0; stride]; |
467 | let mut line1 = vec![0; stride]; | |
468 | swap_plane(&mut data[off..], stride, h, line0.as_mut_slice(), line1.as_mut_slice()); | |
469 | } | |
470 | }, | |
471 | _ => { return Err(ScaleError::InvalidArgument); }, | |
472 | }; | |
473 | Ok(()) | |
474 | } | |
475 | ||
03accf76 | 476 | impl NAScale { |
30d57e4a | 477 | /// Constructs a new `NAScale` instance. |
03accf76 | 478 | pub fn new(fmt_in: ScaleInfo, fmt_out: ScaleInfo) -> ScaleResult<Self> { |
03accf76 | 479 | let just_convert = (fmt_in.width == fmt_out.width) && (fmt_in.height == fmt_out.height); |
6f263099 KS |
480 | let pipeline = if fmt_in != fmt_out { |
481 | build_pipeline(&fmt_in, &fmt_out, just_convert, &[])? | |
482 | } else { | |
483 | None | |
484 | }; | |
25e0bf9a KS |
485 | Ok(Self { fmt_in, fmt_out, just_convert, pipeline }) |
486 | } | |
487 | /// Constructs a new `NAScale` instance taking into account provided options. | |
488 | pub fn new_with_options(fmt_in: ScaleInfo, fmt_out: ScaleInfo, options: &[(String, String)]) -> ScaleResult<Self> { | |
25e0bf9a | 489 | let just_convert = (fmt_in.width == fmt_out.width) && (fmt_in.height == fmt_out.height); |
6f263099 KS |
490 | let pipeline = if fmt_in != fmt_out { |
491 | build_pipeline(&fmt_in, &fmt_out, just_convert, options)? | |
492 | } else { | |
493 | None | |
494 | }; | |
03accf76 KS |
495 | Ok(Self { fmt_in, fmt_out, just_convert, pipeline }) |
496 | } | |
30d57e4a | 497 | /// Checks whether requested conversion operation is needed at all. |
03accf76 | 498 | pub fn needs_processing(&self) -> bool { self.pipeline.is_some() } |
30d57e4a | 499 | /// Returns the input image format. |
03accf76 | 500 | pub fn get_in_fmt(&self) -> ScaleInfo { self.fmt_in } |
30d57e4a | 501 | /// Returns the output image format. |
03accf76 | 502 | pub fn get_out_fmt(&self) -> ScaleInfo { self.fmt_out } |
30d57e4a | 503 | /// Performs the image format conversion. |
03accf76 KS |
504 | pub fn convert(&mut self, pic_in: &NABufferType, pic_out: &mut NABufferType) -> ScaleResult<()> { |
505 | let in_info = pic_in.get_video_info(); | |
506 | let out_info = pic_out.get_video_info(); | |
507 | if in_info.is_none() || out_info.is_none() { return Err(ScaleError::InvalidArgument); } | |
508 | let in_info = in_info.unwrap(); | |
509 | let out_info = out_info.unwrap(); | |
510 | if self.just_convert && | |
511 | (in_info.get_width() != out_info.get_width() || in_info.get_height() != out_info.get_height()) { | |
512 | return Err(ScaleError::InvalidArgument); | |
513 | } | |
085742a3 | 514 | let needs_flip = in_info.is_flipped() ^ out_info.is_flipped(); |
03accf76 KS |
515 | check_format(in_info, &self.fmt_in, self.just_convert)?; |
516 | check_format(out_info, &self.fmt_out, self.just_convert)?; | |
085742a3 KS |
517 | let ret = if let Some(ref mut pipe) = self.pipeline { |
518 | pipe.process(pic_in, pic_out) | |
519 | } else { | |
520 | copy(pic_in, pic_out); | |
521 | Ok(()) | |
522 | }; | |
523 | if ret.is_ok() && needs_flip { | |
524 | flip_picture(pic_out)?; | |
03accf76 | 525 | } |
085742a3 | 526 | ret |
03accf76 KS |
527 | } |
528 | } | |
529 | ||
530 | #[cfg(test)] | |
531 | mod test { | |
532 | use super::*; | |
533 | ||
534 | fn fill_pic(pic: &mut NABufferType, val: u8) { | |
535 | if let Some(ref mut buf) = pic.get_vbuf() { | |
536 | let data = buf.get_data_mut().unwrap(); | |
537 | for el in data.iter_mut() { *el = val; } | |
538 | } else if let Some(ref mut buf) = pic.get_vbuf16() { | |
539 | let data = buf.get_data_mut().unwrap(); | |
540 | for el in data.iter_mut() { *el = val as u16; } | |
541 | } else if let Some(ref mut buf) = pic.get_vbuf32() { | |
542 | let data = buf.get_data_mut().unwrap(); | |
543 | for el in data.iter_mut() { *el = (val as u32) * 0x01010101; } | |
544 | } | |
545 | } | |
546 | #[test] | |
547 | fn test_convert() { | |
548 | let mut in_pic = alloc_video_buffer(NAVideoInfo::new(1, 1, false, RGB565_FORMAT), 3).unwrap(); | |
549 | fill_pic(&mut in_pic, 42); | |
550 | let mut out_pic = alloc_video_buffer(NAVideoInfo::new(1, 1, false, RGB24_FORMAT), 3).unwrap(); | |
551 | fill_pic(&mut out_pic, 0); | |
552 | let ifmt = get_scale_fmt_from_pic(&in_pic); | |
553 | let ofmt = get_scale_fmt_from_pic(&out_pic); | |
554 | let mut scaler = NAScale::new(ifmt, ofmt).unwrap(); | |
555 | scaler.convert(&in_pic, &mut out_pic).unwrap(); | |
556 | let obuf = out_pic.get_vbuf().unwrap(); | |
557 | let odata = obuf.get_data(); | |
558 | assert_eq!(odata[0], 0x0); | |
559 | assert_eq!(odata[1], 0x4); | |
560 | assert_eq!(odata[2], 0x52); | |
561 | ||
562 | let mut in_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, RGB24_FORMAT), 3).unwrap(); | |
563 | fill_pic(&mut in_pic, 42); | |
564 | let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, YUV420_FORMAT), 3).unwrap(); | |
565 | fill_pic(&mut out_pic, 0); | |
566 | let ifmt = get_scale_fmt_from_pic(&in_pic); | |
567 | let ofmt = get_scale_fmt_from_pic(&out_pic); | |
568 | let mut scaler = NAScale::new(ifmt, ofmt).unwrap(); | |
569 | scaler.convert(&in_pic, &mut out_pic).unwrap(); | |
570 | let obuf = out_pic.get_vbuf().unwrap(); | |
571 | let yoff = obuf.get_offset(0); | |
572 | let uoff = obuf.get_offset(1); | |
573 | let voff = obuf.get_offset(2); | |
574 | let odata = obuf.get_data(); | |
575 | assert_eq!(odata[yoff], 42); | |
576 | assert!(((odata[uoff] ^ 0x80) as i8).abs() <= 1); | |
577 | assert!(((odata[voff] ^ 0x80) as i8).abs() <= 1); | |
578 | let mut scaler = NAScale::new(ofmt, ifmt).unwrap(); | |
579 | scaler.convert(&out_pic, &mut in_pic).unwrap(); | |
580 | let obuf = in_pic.get_vbuf().unwrap(); | |
581 | let odata = obuf.get_data(); | |
582 | assert_eq!(odata[0], 42); | |
583 | } | |
584 | #[test] | |
585 | fn test_scale() { | |
586 | let mut in_pic = alloc_video_buffer(NAVideoInfo::new(2, 2, false, RGB565_FORMAT), 3).unwrap(); | |
587 | fill_pic(&mut in_pic, 42); | |
588 | let mut out_pic = alloc_video_buffer(NAVideoInfo::new(3, 3, false, RGB565_FORMAT), 3).unwrap(); | |
589 | fill_pic(&mut out_pic, 0); | |
590 | let ifmt = get_scale_fmt_from_pic(&in_pic); | |
591 | let ofmt = get_scale_fmt_from_pic(&out_pic); | |
592 | let mut scaler = NAScale::new(ifmt, ofmt).unwrap(); | |
593 | scaler.convert(&in_pic, &mut out_pic).unwrap(); | |
594 | let obuf = out_pic.get_vbuf16().unwrap(); | |
595 | let odata = obuf.get_data(); | |
596 | assert_eq!(odata[0], 42); | |
597 | } | |
598 | #[test] | |
599 | fn test_scale_and_convert() { | |
600 | let mut in_pic = alloc_video_buffer(NAVideoInfo::new(7, 3, false, RGB565_FORMAT), 3).unwrap(); | |
601 | fill_pic(&mut in_pic, 42); | |
602 | let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, YUV420_FORMAT), 3).unwrap(); | |
603 | fill_pic(&mut out_pic, 0); | |
604 | let ifmt = get_scale_fmt_from_pic(&in_pic); | |
605 | let ofmt = get_scale_fmt_from_pic(&out_pic); | |
606 | let mut scaler = NAScale::new(ifmt, ofmt).unwrap(); | |
607 | scaler.convert(&in_pic, &mut out_pic).unwrap(); | |
608 | let obuf = out_pic.get_vbuf().unwrap(); | |
609 | let yoff = obuf.get_offset(0); | |
610 | let uoff = obuf.get_offset(1); | |
611 | let voff = obuf.get_offset(2); | |
612 | let odata = obuf.get_data(); | |
dd6800c5 KS |
613 | assert_eq!(odata[yoff], 11); |
614 | assert_eq!(odata[uoff], 162); | |
615 | assert_eq!(odata[voff], 118); | |
03accf76 | 616 | } |
4b459d0b KS |
617 | #[test] |
618 | fn test_scale_and_convert_to_pal() { | |
619 | let mut in_pic = alloc_video_buffer(NAVideoInfo::new(7, 3, false, YUV420_FORMAT), 3).unwrap(); | |
620 | fill_pic(&mut in_pic, 142); | |
621 | let mut out_pic = alloc_video_buffer(NAVideoInfo::new(4, 4, false, PAL8_FORMAT), 0).unwrap(); | |
622 | fill_pic(&mut out_pic, 0); | |
623 | let ifmt = get_scale_fmt_from_pic(&in_pic); | |
624 | let ofmt = get_scale_fmt_from_pic(&out_pic); | |
625 | let mut scaler = NAScale::new(ifmt, ofmt).unwrap(); | |
626 | scaler.convert(&in_pic, &mut out_pic).unwrap(); | |
627 | let obuf = out_pic.get_vbuf().unwrap(); | |
628 | let dataoff = obuf.get_offset(0); | |
629 | let paloff = obuf.get_offset(1); | |
630 | let odata = obuf.get_data(); | |
631 | assert_eq!(odata[dataoff], 0); | |
632 | assert_eq!(odata[paloff], 157); | |
dd6800c5 | 633 | assert_eq!(odata[paloff + 1], 129); |
4b459d0b KS |
634 | assert_eq!(odata[paloff + 2], 170); |
635 | } | |
213e9f9e KS |
636 | #[test] |
637 | fn test_scale_modes() { | |
638 | const IN_DATA: [[u8; 6]; 2] = [ | |
639 | [0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0], | |
640 | [0x00, 0x40, 0xC0, 0xFF, 0xC0, 0x40] | |
641 | ]; | |
642 | const TEST_DATA: &[(&str, [[u8; 9]; 3])] = &[ | |
643 | ("nn", | |
644 | [[0xFF, 0xC0, 0x40, 0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0], | |
645 | [0xFF, 0xC0, 0x40, 0xFF, 0xC0, 0x40, 0x00, 0x40, 0xC0], | |
646 | [0x00, 0x40, 0xC0, 0x00, 0x40, 0xC0, 0xFF, 0xC0, 0x40]]), | |
647 | ("bilin", | |
648 | [[0xFF, 0xC0, 0x40, 0x55, 0x6A, 0x95, 0x00, 0x40, 0xC0], | |
649 | [0x55, 0x6A, 0x95, 0x8D, 0x86, 0x78, 0xAA, 0x95, 0x6A], | |
650 | [0x00, 0x40, 0xC0, 0xAA, 0x95, 0x6A, 0xFF, 0xC0, 0x40]]), | |
651 | ("bicubic", | |
652 | [[0xFF, 0xC0, 0x40, 0x4B, 0x65, 0x9A, 0x00, 0x36, 0xC9], | |
653 | [0x4B, 0x65, 0x9A, 0x94, 0x8A, 0x74, 0xB3, 0x9D, 0x61], | |
654 | [0x00, 0x36, 0xC9, 0xBA, 0x9D, 0x61, 0xFF, 0xD3, 0x2B]]), | |
655 | ("lanczos", | |
656 | [[0xFF, 0xC0, 0x40, 0x4C, 0x66, 0x98, 0x00, 0x31, 0xCD], | |
657 | [0x4C, 0x66, 0x98, 0x91, 0x88, 0x74, 0xB1, 0x9D, 0x5F], | |
658 | [0x00, 0x31, 0xCD, 0xBB, 0x9D, 0x5F, 0xFF, 0xDD, 0x1E]]), | |
659 | ("lanczos2", | |
660 | [[0xFF, 0xC0, 0x40, 0x4F, 0x68, 0x9B, 0x00, 0x35, 0xCD], | |
661 | [0x4F, 0x68, 0x9B, 0x96, 0x8D, 0x79, 0xB3, 0xA0, 0x64], | |
662 | [0x00, 0x35, 0xCD, 0xBE, 0xA1, 0x65, 0xFF, 0xDC, 0x28]]), | |
663 | ]; | |
664 | ||
665 | let in_pic = alloc_video_buffer(NAVideoInfo::new(2, 2, false, RGB24_FORMAT), 3).unwrap(); | |
666 | if let Some(ref mut vbuf) = in_pic.get_vbuf() { | |
667 | let stride = vbuf.get_stride(0); | |
668 | let data = vbuf.get_data_mut().unwrap(); | |
669 | for (dline, rline) in data.chunks_mut(stride).zip(IN_DATA.iter()) { | |
670 | dline[..6].copy_from_slice(rline); | |
671 | } | |
672 | } else { | |
673 | panic!("wrong format"); | |
674 | } | |
675 | let mut out_pic = alloc_video_buffer(NAVideoInfo::new(3, 3, false, RGB24_FORMAT), 3).unwrap(); | |
676 | let ifmt = get_scale_fmt_from_pic(&in_pic); | |
677 | let ofmt = get_scale_fmt_from_pic(&out_pic); | |
678 | for (method, ref_data) in TEST_DATA.iter() { | |
679 | fill_pic(&mut out_pic, 0); | |
680 | let mut scaler = NAScale::new_with_options(ifmt, ofmt, &[("scaler".to_string(), method.to_string())]).unwrap(); | |
681 | scaler.convert(&in_pic, &mut out_pic).unwrap(); | |
682 | let obuf = out_pic.get_vbuf().unwrap(); | |
683 | let ostride = obuf.get_stride(0); | |
684 | let odata = obuf.get_data(); | |
685 | for (oline, rline) in odata.chunks(ostride).zip(ref_data.iter()) { | |
686 | for (&a, &b) in oline.iter().zip(rline.iter()) { | |
687 | assert_eq!(a, b); | |
688 | } | |
689 | } | |
690 | } | |
691 | } | |
03accf76 | 692 | } |