10db18de0d15ccf7be8a9d4dc50123e4a7f5c4fc
[nihav.git] / nihav-core / src / frame.rs
1 //! Packets and decoded frames functionality.
2 use std::cmp::max;
3 //use std::collections::HashMap;
4 use std::fmt;
5 pub use std::sync::Arc;
6 pub use crate::formats::*;
7 pub use crate::refs::*;
8 use std::str::FromStr;
9
10 /// Audio stream information.
11 #[allow(dead_code)]
12 #[derive(Clone,Copy,PartialEq)]
13 pub struct NAAudioInfo {
14 /// Sample rate.
15 pub sample_rate: u32,
16 /// Number of channels.
17 pub channels: u8,
18 /// Audio sample format.
19 pub format: NASoniton,
20 /// Length of one audio block in samples.
21 pub block_len: usize,
22 }
23
24 impl NAAudioInfo {
25 /// Constructs a new `NAAudioInfo` instance.
26 pub fn new(sr: u32, ch: u8, fmt: NASoniton, bl: usize) -> Self {
27 NAAudioInfo { sample_rate: sr, channels: ch, format: fmt, block_len: bl }
28 }
29 /// Returns audio sample rate.
30 pub fn get_sample_rate(&self) -> u32 { self.sample_rate }
31 /// Returns the number of channels.
32 pub fn get_channels(&self) -> u8 { self.channels }
33 /// Returns sample format.
34 pub fn get_format(&self) -> NASoniton { self.format }
35 /// Returns one audio block duration in samples.
36 pub fn get_block_len(&self) -> usize { self.block_len }
37 }
38
39 impl fmt::Display for NAAudioInfo {
40 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
41 write!(f, "{} Hz, {} ch", self.sample_rate, self.channels)
42 }
43 }
44
45 /// Video stream information.
46 #[allow(dead_code)]
47 #[derive(Clone,Copy,PartialEq)]
48 pub struct NAVideoInfo {
49 /// Picture width.
50 pub width: usize,
51 /// Picture height.
52 pub height: usize,
53 /// Picture is stored downside up.
54 pub flipped: bool,
55 /// Picture pixel format.
56 pub format: NAPixelFormaton,
57 /// Declared bits per sample.
58 pub bits: u8,
59 }
60
61 impl NAVideoInfo {
62 /// Constructs a new `NAVideoInfo` instance.
63 pub fn new(w: usize, h: usize, flip: bool, fmt: NAPixelFormaton) -> Self {
64 let bits = fmt.get_total_depth();
65 NAVideoInfo { width: w, height: h, flipped: flip, format: fmt, bits }
66 }
67 /// Returns picture width.
68 pub fn get_width(&self) -> usize { self.width as usize }
69 /// Returns picture height.
70 pub fn get_height(&self) -> usize { self.height as usize }
71 /// Returns picture orientation.
72 pub fn is_flipped(&self) -> bool { self.flipped }
73 /// Returns picture pixel format.
74 pub fn get_format(&self) -> NAPixelFormaton { self.format }
75 /// Sets new picture width.
76 pub fn set_width(&mut self, w: usize) { self.width = w; }
77 /// Sets new picture height.
78 pub fn set_height(&mut self, h: usize) { self.height = h; }
79 }
80
81 impl fmt::Display for NAVideoInfo {
82 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
83 write!(f, "{}x{}", self.width, self.height)
84 }
85 }
86
87 /// A list of possible stream information types.
88 #[derive(Clone,Copy,PartialEq)]
89 pub enum NACodecTypeInfo {
90 /// No codec present.
91 None,
92 /// Audio codec information.
93 Audio(NAAudioInfo),
94 /// Video codec information.
95 Video(NAVideoInfo),
96 }
97
98 impl NACodecTypeInfo {
99 /// Returns video stream information.
100 pub fn get_video_info(&self) -> Option<NAVideoInfo> {
101 match *self {
102 NACodecTypeInfo::Video(vinfo) => Some(vinfo),
103 _ => None,
104 }
105 }
106 /// Returns audio stream information.
107 pub fn get_audio_info(&self) -> Option<NAAudioInfo> {
108 match *self {
109 NACodecTypeInfo::Audio(ainfo) => Some(ainfo),
110 _ => None,
111 }
112 }
113 /// Reports whether the current stream is video stream.
114 pub fn is_video(&self) -> bool {
115 matches!(*self, NACodecTypeInfo::Video(_))
116 }
117 /// Reports whether the current stream is audio stream.
118 pub fn is_audio(&self) -> bool {
119 matches!(*self, NACodecTypeInfo::Audio(_))
120 }
121 }
122
123 impl fmt::Display for NACodecTypeInfo {
124 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
125 let ret = match *self {
126 NACodecTypeInfo::None => "".to_string(),
127 NACodecTypeInfo::Audio(fmt) => format!("{}", fmt),
128 NACodecTypeInfo::Video(fmt) => format!("{}", fmt),
129 };
130 write!(f, "{}", ret)
131 }
132 }
133
134 /// Decoded video frame.
135 ///
136 /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer.
137 /// In case of image with several components those components are stored sequentially and can be accessed in the buffer starting at corresponding component offset.
138 #[derive(Clone)]
139 pub struct NAVideoBuffer<T> {
140 info: NAVideoInfo,
141 data: NABufferRef<Vec<T>>,
142 offs: Vec<usize>,
143 strides: Vec<usize>,
144 }
145
146 impl<T: Clone> NAVideoBuffer<T> {
147 /// Constructs video buffer from the provided components.
148 pub fn from_raw_parts(info: NAVideoInfo, data: NABufferRef<Vec<T>>, offs: Vec<usize>, strides: Vec<usize>) -> Self {
149 Self { info, data, offs, strides }
150 }
151 /// Returns the component offset (0 for all unavailable offsets).
152 pub fn get_offset(&self, idx: usize) -> usize {
153 if idx >= self.offs.len() { 0 }
154 else { self.offs[idx] }
155 }
156 /// Returns picture info.
157 pub fn get_info(&self) -> NAVideoInfo { self.info }
158 /// Returns an immutable reference to the data.
159 pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() }
160 /// Returns a mutable reference to the data.
161 pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() }
162 /// Returns the number of components in picture format.
163 pub fn get_num_components(&self) -> usize { self.offs.len() }
164 /// Creates a copy of current `NAVideoBuffer`.
165 pub fn copy_buffer(&self) -> Self {
166 let mut data: Vec<T> = Vec::with_capacity(self.data.len());
167 data.clone_from(self.data.as_ref());
168 let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
169 offs.clone_from(&self.offs);
170 let mut strides: Vec<usize> = Vec::with_capacity(self.strides.len());
171 strides.clone_from(&self.strides);
172 NAVideoBuffer { info: self.info, data: NABufferRef::new(data), offs, strides }
173 }
174 /// Returns stride (distance between subsequent lines) for the requested component.
175 pub fn get_stride(&self, idx: usize) -> usize {
176 if idx >= self.strides.len() { return 0; }
177 self.strides[idx]
178 }
179 /// Returns requested component dimensions.
180 pub fn get_dimensions(&self, idx: usize) -> (usize, usize) {
181 get_plane_size(&self.info, idx)
182 }
183 /// Converts current instance into buffer reference.
184 pub fn into_ref(self) -> NABufferRef<Self> {
185 NABufferRef::new(self)
186 }
187
188 fn print_contents(&self, datatype: &str) {
189 println!("{} video buffer size {}", datatype, self.data.len());
190 println!(" format {}", self.info);
191 print!(" offsets:");
192 for off in self.offs.iter() {
193 print!(" {}", *off);
194 }
195 println!();
196 print!(" strides:");
197 for stride in self.strides.iter() {
198 print!(" {}", *stride);
199 }
200 println!();
201 }
202 }
203
204 /// A specialised type for reference-counted `NAVideoBuffer`.
205 pub type NAVideoBufferRef<T> = NABufferRef<NAVideoBuffer<T>>;
206
207 /// Decoded audio frame.
208 ///
209 /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer.
210 /// In case of planar audio samples for each channel are stored sequentially and can be accessed in the buffer starting at corresponding channel offset.
211 #[derive(Clone)]
212 pub struct NAAudioBuffer<T> {
213 info: NAAudioInfo,
214 data: NABufferRef<Vec<T>>,
215 offs: Vec<usize>,
216 stride: usize,
217 step: usize,
218 chmap: NAChannelMap,
219 len: usize,
220 }
221
222 impl<T: Clone> NAAudioBuffer<T> {
223 /// Returns the start position of requested channel data.
224 pub fn get_offset(&self, idx: usize) -> usize {
225 if idx >= self.offs.len() { 0 }
226 else { self.offs[idx] }
227 }
228 /// Returns the distance between the start of one channel and the next one.
229 pub fn get_stride(&self) -> usize { self.stride }
230 /// Returns the distance between the samples in one channel.
231 pub fn get_step(&self) -> usize { self.step }
232 /// Returns audio format information.
233 pub fn get_info(&self) -> NAAudioInfo { self.info }
234 /// Returns channel map.
235 pub fn get_chmap(&self) -> &NAChannelMap { &self.chmap }
236 /// Returns an immutable reference to the data.
237 pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() }
238 /// Returns reference to the data.
239 pub fn get_data_ref(&self) -> NABufferRef<Vec<T>> { self.data.clone() }
240 /// Returns a mutable reference to the data.
241 pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() }
242 /// Clones current `NAAudioBuffer` into a new one.
243 pub fn copy_buffer(&mut self) -> Self {
244 let mut data: Vec<T> = Vec::with_capacity(self.data.len());
245 data.clone_from(self.data.as_ref());
246 let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
247 offs.clone_from(&self.offs);
248 NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride, step: self.step }
249 }
250 /// Return the length of frame in samples.
251 pub fn get_length(&self) -> usize { self.len }
252 /// Truncates buffer length if possible.
253 ///
254 /// In case when new length is larger than old length nothing is done.
255 pub fn truncate(&mut self, new_len: usize) {
256 self.len = self.len.min(new_len);
257 }
258
259 fn print_contents(&self, datatype: &str) {
260 println!("Audio buffer with {} data, stride {}, step {}", datatype, self.stride, self.step);
261 println!(" format {}", self.info);
262 println!(" channel map {}", self.chmap);
263 print!(" offsets:");
264 for off in self.offs.iter() {
265 print!(" {}", *off);
266 }
267 println!();
268 }
269 }
270
271 impl NAAudioBuffer<u8> {
272 /// Constructs a new `NAAudioBuffer` instance.
273 pub fn new_from_buf(info: NAAudioInfo, data: NABufferRef<Vec<u8>>, chmap: NAChannelMap) -> Self {
274 let len = data.len() * 8 / chmap.num_channels() / (info.format.bits as usize);
275
276 NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0, step: 0 }
277 }
278 }
279
280 /// A list of possible decoded frame types.
281 #[derive(Clone)]
282 pub enum NABufferType {
283 /// 8-bit video buffer.
284 Video (NAVideoBufferRef<u8>),
285 /// 16-bit video buffer (i.e. every component or packed pixel fits into 16 bits).
286 Video16 (NAVideoBufferRef<u16>),
287 /// 32-bit video buffer (i.e. every component or packed pixel fits into 32 bits).
288 Video32 (NAVideoBufferRef<u32>),
289 /// Packed video buffer.
290 VideoPacked(NAVideoBufferRef<u8>),
291 /// Audio buffer with 8-bit unsigned integer audio.
292 AudioU8 (NAAudioBuffer<u8>),
293 /// Audio buffer with 16-bit signed integer audio.
294 AudioI16 (NAAudioBuffer<i16>),
295 /// Audio buffer with 32-bit signed integer audio.
296 AudioI32 (NAAudioBuffer<i32>),
297 /// Audio buffer with 32-bit floating point audio.
298 AudioF32 (NAAudioBuffer<f32>),
299 /// Packed audio buffer.
300 AudioPacked(NAAudioBuffer<u8>),
301 /// Buffer with generic data (e.g. subtitles).
302 Data (NABufferRef<Vec<u8>>),
303 /// No data present.
304 None,
305 }
306
307 impl NABufferType {
308 /// Returns the offset to the requested component or channel.
309 pub fn get_offset(&self, idx: usize) -> usize {
310 match *self {
311 NABufferType::Video(ref vb) => vb.get_offset(idx),
312 NABufferType::Video16(ref vb) => vb.get_offset(idx),
313 NABufferType::Video32(ref vb) => vb.get_offset(idx),
314 NABufferType::VideoPacked(ref vb) => vb.get_offset(idx),
315 NABufferType::AudioU8(ref ab) => ab.get_offset(idx),
316 NABufferType::AudioI16(ref ab) => ab.get_offset(idx),
317 NABufferType::AudioI32(ref ab) => ab.get_offset(idx),
318 NABufferType::AudioF32(ref ab) => ab.get_offset(idx),
319 NABufferType::AudioPacked(ref ab) => ab.get_offset(idx),
320 _ => 0,
321 }
322 }
323 /// Returns information for video frames.
324 pub fn get_video_info(&self) -> Option<NAVideoInfo> {
325 match *self {
326 NABufferType::Video(ref vb) => Some(vb.get_info()),
327 NABufferType::Video16(ref vb) => Some(vb.get_info()),
328 NABufferType::Video32(ref vb) => Some(vb.get_info()),
329 NABufferType::VideoPacked(ref vb) => Some(vb.get_info()),
330 _ => None,
331 }
332 }
333 /// Returns reference to 8-bit (or packed) video buffer.
334 pub fn get_vbuf(&self) -> Option<NAVideoBufferRef<u8>> {
335 match *self {
336 NABufferType::Video(ref vb) => Some(vb.clone()),
337 NABufferType::VideoPacked(ref vb) => Some(vb.clone()),
338 _ => None,
339 }
340 }
341 /// Returns reference to 16-bit video buffer.
342 pub fn get_vbuf16(&self) -> Option<NAVideoBufferRef<u16>> {
343 match *self {
344 NABufferType::Video16(ref vb) => Some(vb.clone()),
345 _ => None,
346 }
347 }
348 /// Returns reference to 32-bit video buffer.
349 pub fn get_vbuf32(&self) -> Option<NAVideoBufferRef<u32>> {
350 match *self {
351 NABufferType::Video32(ref vb) => Some(vb.clone()),
352 _ => None,
353 }
354 }
355 /// Returns information for audio frames.
356 pub fn get_audio_info(&self) -> Option<NAAudioInfo> {
357 match *self {
358 NABufferType::AudioU8(ref ab) => Some(ab.get_info()),
359 NABufferType::AudioI16(ref ab) => Some(ab.get_info()),
360 NABufferType::AudioI32(ref ab) => Some(ab.get_info()),
361 NABufferType::AudioF32(ref ab) => Some(ab.get_info()),
362 NABufferType::AudioPacked(ref ab) => Some(ab.get_info()),
363 _ => None,
364 }
365 }
366 /// Returns audio channel map.
367 pub fn get_chmap(&self) -> Option<&NAChannelMap> {
368 match *self {
369 NABufferType::AudioU8(ref ab) => Some(ab.get_chmap()),
370 NABufferType::AudioI16(ref ab) => Some(ab.get_chmap()),
371 NABufferType::AudioI32(ref ab) => Some(ab.get_chmap()),
372 NABufferType::AudioF32(ref ab) => Some(ab.get_chmap()),
373 NABufferType::AudioPacked(ref ab) => Some(ab.get_chmap()),
374 _ => None,
375 }
376 }
377 /// Returns audio frame duration in samples.
378 pub fn get_audio_length(&self) -> usize {
379 match *self {
380 NABufferType::AudioU8(ref ab) => ab.get_length(),
381 NABufferType::AudioI16(ref ab) => ab.get_length(),
382 NABufferType::AudioI32(ref ab) => ab.get_length(),
383 NABufferType::AudioF32(ref ab) => ab.get_length(),
384 NABufferType::AudioPacked(ref ab) => ab.get_length(),
385 _ => 0,
386 }
387 }
388 /// Truncates audio frame duration if possible.
389 pub fn truncate_audio(&mut self, len: usize) {
390 match *self {
391 NABufferType::AudioU8(ref mut ab) => ab.truncate(len),
392 NABufferType::AudioI16(ref mut ab) => ab.truncate(len),
393 NABufferType::AudioI32(ref mut ab) => ab.truncate(len),
394 NABufferType::AudioF32(ref mut ab) => ab.truncate(len),
395 NABufferType::AudioPacked(ref mut ab) => ab.truncate(len),
396 _ => {},
397 };
398 }
399 /// Returns the distance between starts of two channels.
400 pub fn get_audio_stride(&self) -> usize {
401 match *self {
402 NABufferType::AudioU8(ref ab) => ab.get_stride(),
403 NABufferType::AudioI16(ref ab) => ab.get_stride(),
404 NABufferType::AudioI32(ref ab) => ab.get_stride(),
405 NABufferType::AudioF32(ref ab) => ab.get_stride(),
406 NABufferType::AudioPacked(ref ab) => ab.get_stride(),
407 _ => 0,
408 }
409 }
410 /// Returns the distance between two samples in one channel.
411 pub fn get_audio_step(&self) -> usize {
412 match *self {
413 NABufferType::AudioU8(ref ab) => ab.get_step(),
414 NABufferType::AudioI16(ref ab) => ab.get_step(),
415 NABufferType::AudioI32(ref ab) => ab.get_step(),
416 NABufferType::AudioF32(ref ab) => ab.get_step(),
417 NABufferType::AudioPacked(ref ab) => ab.get_step(),
418 _ => 0,
419 }
420 }
421 /// Returns reference to 8-bit (or packed) audio buffer.
422 pub fn get_abuf_u8(&self) -> Option<NAAudioBuffer<u8>> {
423 match *self {
424 NABufferType::AudioU8(ref ab) => Some(ab.clone()),
425 NABufferType::AudioPacked(ref ab) => Some(ab.clone()),
426 _ => None,
427 }
428 }
429 /// Returns reference to 16-bit audio buffer.
430 pub fn get_abuf_i16(&self) -> Option<NAAudioBuffer<i16>> {
431 match *self {
432 NABufferType::AudioI16(ref ab) => Some(ab.clone()),
433 _ => None,
434 }
435 }
436 /// Returns reference to 32-bit integer audio buffer.
437 pub fn get_abuf_i32(&self) -> Option<NAAudioBuffer<i32>> {
438 match *self {
439 NABufferType::AudioI32(ref ab) => Some(ab.clone()),
440 _ => None,
441 }
442 }
443 /// Returns reference to 32-bit floating point audio buffer.
444 pub fn get_abuf_f32(&self) -> Option<NAAudioBuffer<f32>> {
445 match *self {
446 NABufferType::AudioF32(ref ab) => Some(ab.clone()),
447 _ => None,
448 }
449 }
450 /// Prints internal buffer layout.
451 pub fn print_buffer_metadata(&self) {
452 match *self {
453 NABufferType::Video(ref buf) => buf.print_contents("8-bit"),
454 NABufferType::Video16(ref buf) => buf.print_contents("16-bit"),
455 NABufferType::Video32(ref buf) => buf.print_contents("32-bit"),
456 NABufferType::VideoPacked(ref buf) => buf.print_contents("packed"),
457 NABufferType::AudioU8(ref buf) => buf.print_contents("8-bit unsigned integer"),
458 NABufferType::AudioI16(ref buf) => buf.print_contents("16-bit integer"),
459 NABufferType::AudioI32(ref buf) => buf.print_contents("32-bit integer"),
460 NABufferType::AudioF32(ref buf) => buf.print_contents("32-bit float"),
461 NABufferType::AudioPacked(ref buf) => buf.print_contents("packed"),
462 NABufferType::Data(ref buf) => { println!("Data buffer, len = {}", buf.len()); },
463 NABufferType::None => { println!("No buffer"); },
464 };
465 }
466 }
467
468 const NA_SIMPLE_VFRAME_COMPONENTS: usize = 4;
469 /// Simplified decoded frame data.
470 pub struct NASimpleVideoFrame<'a, T: Copy> {
471 /// Widths of each picture component.
472 pub width: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
473 /// Heights of each picture component.
474 pub height: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
475 /// Orientation (upside-down or downside-up) flag.
476 pub flip: bool,
477 /// Strides for each component.
478 pub stride: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
479 /// Start of each component.
480 pub offset: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
481 /// Number of components.
482 pub components: usize,
483 /// Pointer to the picture pixel data.
484 pub data: &'a mut [T],
485 }
486
487 impl<'a, T:Copy> NASimpleVideoFrame<'a, T> {
488 /// Constructs a new instance of `NASimpleVideoFrame` from `NAVideoBuffer`.
489 pub fn from_video_buf(vbuf: &'a mut NAVideoBuffer<T>) -> Option<Self> {
490 let vinfo = vbuf.get_info();
491 let components = vinfo.format.components as usize;
492 if components > NA_SIMPLE_VFRAME_COMPONENTS {
493 return None;
494 }
495 let mut w: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
496 let mut h: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
497 let mut s: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
498 let mut o: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
499 for comp in 0..components {
500 let (width, height) = vbuf.get_dimensions(comp);
501 w[comp] = width;
502 h[comp] = height;
503 s[comp] = vbuf.get_stride(comp);
504 o[comp] = vbuf.get_offset(comp);
505 }
506 let flip = vinfo.flipped;
507 Some(NASimpleVideoFrame {
508 width: w,
509 height: h,
510 flip,
511 stride: s,
512 offset: o,
513 components,
514 data: vbuf.data.as_mut_slice(),
515 })
516 }
517 }
518
519 /// A list of possible frame allocator errors.
520 #[derive(Debug,Clone,Copy,PartialEq)]
521 pub enum AllocatorError {
522 /// Requested picture dimensions are too large.
523 TooLargeDimensions,
524 /// Invalid input format.
525 FormatError,
526 }
527
528 /// Constructs a new video buffer with requested format.
529 ///
530 /// `align` is power of two alignment for image. E.g. the value of 5 means that frame dimensions will be padded to be multiple of 32.
531 pub fn alloc_video_buffer(vinfo: NAVideoInfo, align: u8) -> Result<NABufferType, AllocatorError> {
532 let fmt = &vinfo.format;
533 let mut new_size: usize = 0;
534 let mut offs: Vec<usize> = Vec::new();
535 let mut strides: Vec<usize> = Vec::new();
536
537 for i in 0..fmt.get_num_comp() {
538 if fmt.get_chromaton(i) == None { return Err(AllocatorError::FormatError); }
539 }
540
541 let align_mod = ((1 << align) as usize) - 1;
542 let width = ((vinfo.width as usize) + align_mod) & !align_mod;
543 let height = ((vinfo.height as usize) + align_mod) & !align_mod;
544 let mut max_depth = 0;
545 let mut all_packed = true;
546 let mut all_bytealigned = true;
547 for i in 0..fmt.get_num_comp() {
548 let ochr = fmt.get_chromaton(i);
549 if ochr.is_none() { continue; }
550 let chr = ochr.unwrap();
551 if !chr.is_packed() {
552 all_packed = false;
553 } else if ((chr.get_shift() + chr.get_depth()) & 7) != 0 {
554 all_bytealigned = false;
555 }
556 max_depth = max(max_depth, chr.get_depth());
557 }
558 let unfit_elem_size = !matches!(fmt.get_elem_size(), 2 | 4);
559
560 //todo semi-packed like NV12
561 if fmt.is_paletted() {
562 //todo various-sized palettes?
563 let stride = vinfo.get_format().get_chromaton(0).unwrap().get_linesize(width);
564 let pic_sz = stride.checked_mul(height);
565 if pic_sz == None { return Err(AllocatorError::TooLargeDimensions); }
566 let pal_size = 256 * (fmt.get_elem_size() as usize);
567 let new_size = pic_sz.unwrap().checked_add(pal_size);
568 if new_size == None { return Err(AllocatorError::TooLargeDimensions); }
569 offs.push(0);
570 offs.push(stride * height);
571 strides.push(stride);
572 let data: Vec<u8> = vec![0; new_size.unwrap()];
573 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
574 Ok(NABufferType::Video(buf.into_ref()))
575 } else if !all_packed {
576 for i in 0..fmt.get_num_comp() {
577 let ochr = fmt.get_chromaton(i);
578 if ochr.is_none() { continue; }
579 let chr = ochr.unwrap();
580 offs.push(new_size as usize);
581 let stride = chr.get_linesize(width);
582 let cur_h = chr.get_height(height);
583 let cur_sz = stride.checked_mul(cur_h);
584 if cur_sz == None { return Err(AllocatorError::TooLargeDimensions); }
585 let new_sz = new_size.checked_add(cur_sz.unwrap());
586 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
587 new_size = new_sz.unwrap();
588 strides.push(stride);
589 }
590 if max_depth <= 8 {
591 let data: Vec<u8> = vec![0; new_size];
592 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
593 Ok(NABufferType::Video(buf.into_ref()))
594 } else if max_depth <= 16 {
595 let data: Vec<u16> = vec![0; new_size];
596 let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
597 Ok(NABufferType::Video16(buf.into_ref()))
598 } else {
599 let data: Vec<u32> = vec![0; new_size];
600 let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
601 Ok(NABufferType::Video32(buf.into_ref()))
602 }
603 } else if all_bytealigned || unfit_elem_size {
604 let elem_sz = fmt.get_elem_size();
605 let line_sz = width.checked_mul(elem_sz as usize);
606 if line_sz == None { return Err(AllocatorError::TooLargeDimensions); }
607 let new_sz = line_sz.unwrap().checked_mul(height);
608 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
609 new_size = new_sz.unwrap();
610 let data: Vec<u8> = vec![0; new_size];
611 strides.push(line_sz.unwrap());
612 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
613 Ok(NABufferType::VideoPacked(buf.into_ref()))
614 } else {
615 let elem_sz = fmt.get_elem_size();
616 let new_sz = width.checked_mul(height);
617 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
618 new_size = new_sz.unwrap();
619 match elem_sz {
620 2 => {
621 let data: Vec<u16> = vec![0; new_size];
622 strides.push(width);
623 let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
624 Ok(NABufferType::Video16(buf.into_ref()))
625 },
626 4 => {
627 let data: Vec<u32> = vec![0; new_size];
628 strides.push(width);
629 let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
630 Ok(NABufferType::Video32(buf.into_ref()))
631 },
632 _ => unreachable!(),
633 }
634 }
635 }
636
637 /// Constructs a new audio buffer for the requested format and length.
638 #[allow(clippy::collapsible_if)]
639 pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelMap) -> Result<NABufferType, AllocatorError> {
640 let mut offs: Vec<usize> = Vec::new();
641 if ainfo.format.is_planar() || ((ainfo.format.get_bits() % 8) == 0) {
642 let len = nsamples.checked_mul(ainfo.channels as usize);
643 if len == None { return Err(AllocatorError::TooLargeDimensions); }
644 let length = len.unwrap();
645 let stride;
646 let step;
647 if ainfo.format.is_planar() {
648 stride = nsamples;
649 step = 1;
650 for i in 0..ainfo.channels {
651 offs.push((i as usize) * stride);
652 }
653 } else {
654 stride = 1;
655 step = ainfo.channels as usize;
656 for i in 0..ainfo.channels {
657 offs.push(i as usize);
658 }
659 }
660 if ainfo.format.is_float() {
661 if ainfo.format.get_bits() == 32 {
662 let data: Vec<f32> = vec![0.0; length];
663 let buf: NAAudioBuffer<f32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
664 Ok(NABufferType::AudioF32(buf))
665 } else {
666 Err(AllocatorError::TooLargeDimensions)
667 }
668 } else {
669 if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() {
670 let data: Vec<u8> = vec![0; length];
671 let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
672 Ok(NABufferType::AudioU8(buf))
673 } else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() {
674 let data: Vec<i16> = vec![0; length];
675 let buf: NAAudioBuffer<i16> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
676 Ok(NABufferType::AudioI16(buf))
677 } else if ainfo.format.get_bits() == 32 && ainfo.format.is_signed() {
678 let data: Vec<i32> = vec![0; length];
679 let buf: NAAudioBuffer<i32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
680 Ok(NABufferType::AudioI32(buf))
681 } else {
682 Err(AllocatorError::TooLargeDimensions)
683 }
684 }
685 } else {
686 let len = nsamples.checked_mul(ainfo.channels as usize);
687 if len == None { return Err(AllocatorError::TooLargeDimensions); }
688 let length = ainfo.format.get_audio_size(len.unwrap() as u64);
689 let data: Vec<u8> = vec![0; length];
690 let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0, step: 0 };
691 Ok(NABufferType::AudioPacked(buf))
692 }
693 }
694
695 /// Constructs a new buffer for generic data.
696 pub fn alloc_data_buffer(size: usize) -> Result<NABufferType, AllocatorError> {
697 let data: Vec<u8> = vec![0; size];
698 let buf: NABufferRef<Vec<u8>> = NABufferRef::new(data);
699 Ok(NABufferType::Data(buf))
700 }
701
702 /// Creates a clone of current buffer.
703 pub fn copy_buffer(buf: &NABufferType) -> NABufferType {
704 buf.clone()
705 }
706
707 /// Video frame pool.
708 ///
709 /// This structure allows codec to effectively reuse old frames instead of allocating and de-allocating frames every time.
710 /// Caller can also reserve some frames for its own purposes e.g. display queue.
711 pub struct NAVideoBufferPool<T:Copy> {
712 pool: Vec<NAVideoBufferRef<T>>,
713 max_len: usize,
714 add_len: usize,
715 }
716
717 impl<T:Copy> NAVideoBufferPool<T> {
718 /// Constructs a new `NAVideoBufferPool` instance.
719 pub fn new(max_len: usize) -> Self {
720 Self {
721 pool: Vec::with_capacity(max_len),
722 max_len,
723 add_len: 0,
724 }
725 }
726 /// Sets the number of buffers reserved for the user.
727 pub fn set_dec_bufs(&mut self, add_len: usize) {
728 self.add_len = add_len;
729 }
730 /// Returns an unused buffer from the pool.
731 pub fn get_free(&mut self) -> Option<NAVideoBufferRef<T>> {
732 for e in self.pool.iter() {
733 if e.get_num_refs() == 1 {
734 return Some(e.clone());
735 }
736 }
737 None
738 }
739 /// Clones provided frame data into a free pool frame.
740 pub fn get_copy(&mut self, rbuf: &NAVideoBufferRef<T>) -> Option<NAVideoBufferRef<T>> {
741 let mut dbuf = self.get_free()?;
742 dbuf.data.copy_from_slice(&rbuf.data);
743 Some(dbuf)
744 }
745 /// Clears the pool from all frames.
746 pub fn reset(&mut self) {
747 self.pool.clear();
748 }
749 /// Returns the number of frames currently in use.
750 pub fn get_num_used(&self) -> usize {
751 self.pool.iter().filter(|el| el.get_num_refs() != 1).count()
752 }
753 /// Adds a manually allocated frame to the pool.
754 pub fn add_frame(&mut self, buf: NAVideoBufferRef<T>) {
755 self.pool.push(buf);
756 }
757 /// Returns current video format (if available).
758 pub fn get_info(&self) -> Option<NAVideoInfo> {
759 if !self.pool.is_empty() {
760 Some(self.pool[0].get_info())
761 } else {
762 None
763 }
764 }
765 }
766
767 impl NAVideoBufferPool<u8> {
768 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
769 ///
770 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
771 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
772 let nbufs = self.max_len + self.add_len - self.pool.len();
773 for _ in 0..nbufs {
774 let vbuf = alloc_video_buffer(vinfo, align)?;
775 if let NABufferType::Video(buf) = vbuf {
776 self.pool.push(buf);
777 } else if let NABufferType::VideoPacked(buf) = vbuf {
778 self.pool.push(buf);
779 } else {
780 return Err(AllocatorError::FormatError);
781 }
782 }
783 Ok(())
784 }
785 }
786
787 impl NAVideoBufferPool<u16> {
788 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
789 ///
790 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
791 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
792 let nbufs = self.max_len + self.add_len - self.pool.len();
793 for _ in 0..nbufs {
794 let vbuf = alloc_video_buffer(vinfo, align)?;
795 if let NABufferType::Video16(buf) = vbuf {
796 self.pool.push(buf);
797 } else {
798 return Err(AllocatorError::FormatError);
799 }
800 }
801 Ok(())
802 }
803 }
804
805 impl NAVideoBufferPool<u32> {
806 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
807 ///
808 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
809 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
810 let nbufs = self.max_len + self.add_len - self.pool.len();
811 for _ in 0..nbufs {
812 let vbuf = alloc_video_buffer(vinfo, align)?;
813 if let NABufferType::Video32(buf) = vbuf {
814 self.pool.push(buf);
815 } else {
816 return Err(AllocatorError::FormatError);
817 }
818 }
819 Ok(())
820 }
821 }
822
823 /// Information about codec contained in a stream.
824 #[allow(dead_code)]
825 #[derive(Clone)]
826 pub struct NACodecInfo {
827 name: &'static str,
828 properties: NACodecTypeInfo,
829 extradata: Option<Arc<Vec<u8>>>,
830 }
831
832 /// A specialised type for reference-counted `NACodecInfo`.
833 pub type NACodecInfoRef = Arc<NACodecInfo>;
834
835 impl NACodecInfo {
836 /// Constructs a new instance of `NACodecInfo`.
837 pub fn new(name: &'static str, p: NACodecTypeInfo, edata: Option<Vec<u8>>) -> Self {
838 NACodecInfo { name, properties: p, extradata: edata.map(Arc::new) }
839 }
840 /// Constructs a new reference-counted instance of `NACodecInfo`.
841 pub fn new_ref(name: &'static str, p: NACodecTypeInfo, edata: Option<Arc<Vec<u8>>>) -> Self {
842 NACodecInfo { name, properties: p, extradata: edata }
843 }
844 /// Converts current instance into a reference-counted one.
845 pub fn into_ref(self) -> NACodecInfoRef { Arc::new(self) }
846 /// Returns codec information.
847 pub fn get_properties(&self) -> NACodecTypeInfo { self.properties }
848 /// Returns additional initialisation data required by the codec.
849 pub fn get_extradata(&self) -> Option<Arc<Vec<u8>>> {
850 if let Some(ref vec) = self.extradata { return Some(vec.clone()); }
851 None
852 }
853 /// Returns codec name.
854 pub fn get_name(&self) -> &'static str { self.name }
855 /// Reports whether it is a video codec.
856 pub fn is_video(&self) -> bool {
857 if let NACodecTypeInfo::Video(_) = self.properties { return true; }
858 false
859 }
860 /// Reports whether it is an audio codec.
861 pub fn is_audio(&self) -> bool {
862 if let NACodecTypeInfo::Audio(_) = self.properties { return true; }
863 false
864 }
865 /// Constructs a new empty reference-counted instance of `NACodecInfo`.
866 pub fn new_dummy() -> Arc<Self> {
867 Arc::new(DUMMY_CODEC_INFO)
868 }
869 /// Updates codec infomation.
870 pub fn replace_info(&self, p: NACodecTypeInfo) -> Arc<Self> {
871 Arc::new(NACodecInfo { name: self.name, properties: p, extradata: self.extradata.clone() })
872 }
873 }
874
875 impl Default for NACodecInfo {
876 fn default() -> Self { DUMMY_CODEC_INFO }
877 }
878
879 impl fmt::Display for NACodecInfo {
880 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
881 let edata = match self.extradata.clone() {
882 None => "no extradata".to_string(),
883 Some(v) => format!("{} byte(s) of extradata", v.len()),
884 };
885 write!(f, "{}: {} {}", self.name, self.properties, edata)
886 }
887 }
888
889 /// Default empty codec information.
890 pub const DUMMY_CODEC_INFO: NACodecInfo = NACodecInfo {
891 name: "none",
892 properties: NACodecTypeInfo::None,
893 extradata: None };
894
895 /// A list of recognized frame types.
896 #[derive(Debug,Clone,Copy,PartialEq)]
897 #[allow(dead_code)]
898 pub enum FrameType {
899 /// Intra frame type.
900 I,
901 /// Inter frame type.
902 P,
903 /// Bidirectionally predicted frame.
904 B,
905 /// Skip frame.
906 ///
907 /// When such frame is encountered then last frame should be used again if it is needed.
908 Skip,
909 /// Some other frame type.
910 Other,
911 }
912
913 impl fmt::Display for FrameType {
914 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
915 match *self {
916 FrameType::I => write!(f, "I"),
917 FrameType::P => write!(f, "P"),
918 FrameType::B => write!(f, "B"),
919 FrameType::Skip => write!(f, "skip"),
920 FrameType::Other => write!(f, "x"),
921 }
922 }
923 }
924
925 /// Timestamp information.
926 #[derive(Debug,Clone,Copy)]
927 pub struct NATimeInfo {
928 /// Presentation timestamp.
929 pub pts: Option<u64>,
930 /// Decode timestamp.
931 pub dts: Option<u64>,
932 /// Duration (in timebase units).
933 pub duration: Option<u64>,
934 /// Timebase numerator.
935 pub tb_num: u32,
936 /// Timebase denominator.
937 pub tb_den: u32,
938 }
939
940 impl NATimeInfo {
941 /// Constructs a new `NATimeInfo` instance.
942 pub fn new(pts: Option<u64>, dts: Option<u64>, duration: Option<u64>, tb_num: u32, tb_den: u32) -> Self {
943 NATimeInfo { pts, dts, duration, tb_num, tb_den }
944 }
945 /// Returns presentation timestamp.
946 pub fn get_pts(&self) -> Option<u64> { self.pts }
947 /// Returns decoding timestamp.
948 pub fn get_dts(&self) -> Option<u64> { self.dts }
949 /// Returns duration.
950 pub fn get_duration(&self) -> Option<u64> { self.duration }
951 /// Sets new presentation timestamp.
952 pub fn set_pts(&mut self, pts: Option<u64>) { self.pts = pts; }
953 /// Sets new decoding timestamp.
954 pub fn set_dts(&mut self, dts: Option<u64>) { self.dts = dts; }
955 /// Sets new duration.
956 pub fn set_duration(&mut self, dur: Option<u64>) { self.duration = dur; }
957
958 /// Converts time in given scale into timestamp in given base.
959 #[allow(clippy::collapsible_if)]
960 pub fn time_to_ts(time: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 {
961 let tb_num = u64::from(tb_num);
962 let tb_den = u64::from(tb_den);
963 let tmp = time.checked_mul(tb_den);
964 if let Some(tmp) = tmp {
965 tmp / base / tb_num
966 } else {
967 if tb_num < base {
968 let coarse = time / tb_num;
969 if let Some(tmp) = coarse.checked_mul(tb_den) {
970 tmp / base
971 } else {
972 (coarse / base) * tb_den
973 }
974 } else {
975 let coarse = time / base;
976 if let Some(tmp) = coarse.checked_mul(tb_den) {
977 tmp / tb_num
978 } else {
979 (coarse / tb_num) * tb_den
980 }
981 }
982 }
983 }
984 /// Converts timestamp in given base into time in given scale.
985 pub fn ts_to_time(ts: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 {
986 let tb_num = u64::from(tb_num);
987 let tb_den = u64::from(tb_den);
988 let tmp = ts.checked_mul(base);
989 if let Some(tmp) = tmp {
990 let tmp2 = tmp.checked_mul(tb_num);
991 if let Some(tmp2) = tmp2 {
992 tmp2 / tb_den
993 } else {
994 (tmp / tb_den) * tb_num
995 }
996 } else {
997 let tmp = ts.checked_mul(tb_num);
998 if let Some(tmp) = tmp {
999 (tmp / tb_den) * base
1000 } else {
1001 (ts / tb_den) * base * tb_num
1002 }
1003 }
1004 }
1005 fn get_cur_ts(&self) -> u64 { self.pts.unwrap_or_else(|| self.dts.unwrap_or(0)) }
1006 fn get_cur_millis(&self) -> u64 {
1007 let ts = self.get_cur_ts();
1008 Self::ts_to_time(ts, 1000, self.tb_num, self.tb_den)
1009 }
1010 /// Checks whether the current time information is earler than provided reference time.
1011 pub fn less_than(&self, time: NATimePoint) -> bool {
1012 if self.pts.is_none() && self.dts.is_none() {
1013 return true;
1014 }
1015 match time {
1016 NATimePoint::PTS(rpts) => self.get_cur_ts() < rpts,
1017 NATimePoint::Milliseconds(ms) => self.get_cur_millis() < ms,
1018 NATimePoint::None => false,
1019 }
1020 }
1021 /// Checks whether the current time information is the same as provided reference time.
1022 pub fn equal(&self, time: NATimePoint) -> bool {
1023 if self.pts.is_none() && self.dts.is_none() {
1024 return time == NATimePoint::None;
1025 }
1026 match time {
1027 NATimePoint::PTS(rpts) => self.get_cur_ts() == rpts,
1028 NATimePoint::Milliseconds(ms) => self.get_cur_millis() == ms,
1029 NATimePoint::None => false,
1030 }
1031 }
1032 }
1033
1034 /// Time information for specifying durations or seek positions.
1035 #[derive(Clone,Copy,Debug,PartialEq)]
1036 pub enum NATimePoint {
1037 /// Time in milliseconds.
1038 Milliseconds(u64),
1039 /// Stream timestamp.
1040 PTS(u64),
1041 /// No time information present.
1042 None,
1043 }
1044
1045 impl Default for NATimePoint {
1046 fn default() -> Self {
1047 NATimePoint::None
1048 }
1049 }
1050
1051 impl fmt::Display for NATimePoint {
1052 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1053 match *self {
1054 NATimePoint::Milliseconds(millis) => {
1055 let tot_s = millis / 1000;
1056 let ms = millis % 1000;
1057 if tot_s < 60 {
1058 if ms != 0 {
1059 return write!(f, "{}.{:03}", tot_s, ms);
1060 } else {
1061 return write!(f, "{}", tot_s);
1062 }
1063 }
1064 let tot_m = tot_s / 60;
1065 let s = tot_s % 60;
1066 if tot_m < 60 {
1067 if ms != 0 {
1068 return write!(f, "{}:{:02}.{:03}", tot_m, s, ms);
1069 } else {
1070 return write!(f, "{}:{:02}", tot_m, s);
1071 }
1072 }
1073 let h = tot_m / 60;
1074 let m = tot_m % 60;
1075 if ms != 0 {
1076 write!(f, "{}:{:02}:{:02}.{:03}", h, m, s, ms)
1077 } else {
1078 write!(f, "{}:{:02}:{:02}", h, m, s)
1079 }
1080 },
1081 NATimePoint::PTS(pts) => {
1082 write!(f, "{}pts", pts)
1083 },
1084 NATimePoint::None => {
1085 write!(f, "none")
1086 },
1087 }
1088 }
1089 }
1090
1091 impl FromStr for NATimePoint {
1092 type Err = FormatParseError;
1093
1094 /// Parses the string into time information.
1095 ///
1096 /// Accepted formats are `<u64>pts`, `<u64>ms` or `[hh:][mm:]ss[.ms]`.
1097 fn from_str(s: &str) -> Result<Self, Self::Err> {
1098 if s.is_empty() {
1099 return Err(FormatParseError {});
1100 }
1101 if !s.ends_with("pts") {
1102 if s.ends_with("ms") {
1103 let str_b = s.as_bytes();
1104 let num = std::str::from_utf8(&str_b[..str_b.len() - 2]).unwrap();
1105 let ret = num.parse::<u64>();
1106 if let Ok(val) = ret {
1107 return Ok(NATimePoint::Milliseconds(val));
1108 } else {
1109 return Err(FormatParseError {});
1110 }
1111 }
1112 let mut parts = s.split(':');
1113 let mut hrs = None;
1114 let mut mins = None;
1115 let mut secs = parts.next();
1116 if let Some(part) = parts.next() {
1117 std::mem::swap(&mut mins, &mut secs);
1118 secs = Some(part);
1119 }
1120 if let Some(part) = parts.next() {
1121 std::mem::swap(&mut hrs, &mut mins);
1122 std::mem::swap(&mut mins, &mut secs);
1123 secs = Some(part);
1124 }
1125 if parts.next().is_some() {
1126 return Err(FormatParseError {});
1127 }
1128 let hours = if let Some(val) = hrs {
1129 let ret = val.parse::<u64>();
1130 if ret.is_err() { return Err(FormatParseError {}); }
1131 let val = ret.unwrap();
1132 if val > 1000 { return Err(FormatParseError {}); }
1133 val
1134 } else { 0 };
1135 let minutes = if let Some(val) = mins {
1136 let ret = val.parse::<u64>();
1137 if ret.is_err() { return Err(FormatParseError {}); }
1138 let val = ret.unwrap();
1139 if val >= 60 { return Err(FormatParseError {}); }
1140 val
1141 } else { 0 };
1142 let (seconds, millis) = if let Some(val) = secs {
1143 let mut parts = val.split('.');
1144 let ret = parts.next().unwrap().parse::<u64>();
1145 if ret.is_err() { return Err(FormatParseError {}); }
1146 let seconds = ret.unwrap();
1147 if mins.is_some() && seconds >= 60 { return Err(FormatParseError {}); }
1148 let millis = if let Some(val) = parts.next() {
1149 let mut mval = 0;
1150 let mut base = 0;
1151 for ch in val.chars() {
1152 if ('0'..='9').contains(&ch) {
1153 mval = mval * 10 + u64::from((ch as u8) - b'0');
1154 base += 1;
1155 if base > 3 { break; }
1156 } else {
1157 return Err(FormatParseError {});
1158 }
1159 }
1160 while base < 3 {
1161 mval *= 10;
1162 base += 1;
1163 }
1164 mval
1165 } else { 0 };
1166 (seconds, millis)
1167 } else { unreachable!(); };
1168 let tot_secs = hours * 60 * 60 + minutes * 60 + seconds;
1169 Ok(NATimePoint::Milliseconds(tot_secs * 1000 + millis))
1170 } else {
1171 let str_b = s.as_bytes();
1172 let num = std::str::from_utf8(&str_b[..str_b.len() - 3]).unwrap();
1173 let ret = num.parse::<u64>();
1174 if let Ok(val) = ret {
1175 Ok(NATimePoint::PTS(val))
1176 } else {
1177 Err(FormatParseError {})
1178 }
1179 }
1180 }
1181 }
1182
1183 /// Decoded frame information.
1184 #[allow(dead_code)]
1185 #[derive(Clone)]
1186 pub struct NAFrame {
1187 /// Frame timestamp.
1188 pub ts: NATimeInfo,
1189 /// Frame ID.
1190 pub id: i64,
1191 buffer: NABufferType,
1192 info: NACodecInfoRef,
1193 /// Frame type.
1194 pub frame_type: FrameType,
1195 /// Keyframe flag.
1196 pub key: bool,
1197 // options: HashMap<String, NAValue>,
1198 }
1199
1200 /// A specialised type for reference-counted `NAFrame`.
1201 pub type NAFrameRef = Arc<NAFrame>;
1202
1203 fn get_plane_size(info: &NAVideoInfo, idx: usize) -> (usize, usize) {
1204 let chromaton = info.get_format().get_chromaton(idx);
1205 if chromaton.is_none() { return (0, 0); }
1206 let (hs, vs) = chromaton.unwrap().get_subsampling();
1207 let w = (info.get_width() + ((1 << hs) - 1)) >> hs;
1208 let h = (info.get_height() + ((1 << vs) - 1)) >> vs;
1209 (w, h)
1210 }
1211
1212 impl NAFrame {
1213 /// Constructs a new `NAFrame` instance.
1214 pub fn new(ts: NATimeInfo,
1215 ftype: FrameType,
1216 keyframe: bool,
1217 info: NACodecInfoRef,
1218 /*options: HashMap<String, NAValue>,*/
1219 buffer: NABufferType) -> Self {
1220 NAFrame { ts, id: 0, buffer, info, frame_type: ftype, key: keyframe/*, options*/ }
1221 }
1222 /// Returns frame format information.
1223 pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() }
1224 /// Returns frame type.
1225 pub fn get_frame_type(&self) -> FrameType { self.frame_type }
1226 /// Reports whether the frame is a keyframe.
1227 pub fn is_keyframe(&self) -> bool { self.key }
1228 /// Sets new frame type.
1229 pub fn set_frame_type(&mut self, ftype: FrameType) { self.frame_type = ftype; }
1230 /// Sets keyframe flag.
1231 pub fn set_keyframe(&mut self, key: bool) { self.key = key; }
1232 /// Returns frame timestamp.
1233 pub fn get_time_information(&self) -> NATimeInfo { self.ts }
1234 /// Returns frame presentation time.
1235 pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() }
1236 /// Returns frame decoding time.
1237 pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() }
1238 /// Returns picture ID.
1239 pub fn get_id(&self) -> i64 { self.id }
1240 /// Returns frame display duration.
1241 pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() }
1242 /// Sets new presentation timestamp.
1243 pub fn set_pts(&mut self, pts: Option<u64>) { self.ts.set_pts(pts); }
1244 /// Sets new decoding timestamp.
1245 pub fn set_dts(&mut self, dts: Option<u64>) { self.ts.set_dts(dts); }
1246 /// Sets new picture ID.
1247 pub fn set_id(&mut self, id: i64) { self.id = id; }
1248 /// Sets new duration.
1249 pub fn set_duration(&mut self, dur: Option<u64>) { self.ts.set_duration(dur); }
1250
1251 /// Returns a reference to the frame data.
1252 pub fn get_buffer(&self) -> NABufferType { self.buffer.clone() }
1253
1254 /// Converts current instance into a reference-counted one.
1255 pub fn into_ref(self) -> NAFrameRef { Arc::new(self) }
1256
1257 /// Creates new frame with metadata from `NAPacket`.
1258 pub fn new_from_pkt(pkt: &NAPacket, info: NACodecInfoRef, buf: NABufferType) -> NAFrame {
1259 NAFrame::new(pkt.ts, FrameType::Other, pkt.keyframe, info, /*HashMap::new(),*/ buf)
1260 }
1261 }
1262
1263 impl fmt::Display for NAFrame {
1264 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1265 let mut ostr = format!("frame type {}", self.frame_type);
1266 if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); }
1267 if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); }
1268 if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); }
1269 if self.key { ostr = format!("{} kf", ostr); }
1270 write!(f, "[{}]", ostr)
1271 }
1272 }
1273
1274 /// A list of possible stream types.
1275 #[derive(Debug,Clone,Copy,PartialEq)]
1276 #[allow(dead_code)]
1277 pub enum StreamType {
1278 /// Video stream.
1279 Video,
1280 /// Audio stream.
1281 Audio,
1282 /// Subtitles.
1283 Subtitles,
1284 /// Any data stream (or might be an unrecognized audio/video stream).
1285 Data,
1286 /// Nonexistent stream.
1287 None,
1288 }
1289
1290 impl fmt::Display for StreamType {
1291 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1292 match *self {
1293 StreamType::Video => write!(f, "Video"),
1294 StreamType::Audio => write!(f, "Audio"),
1295 StreamType::Subtitles => write!(f, "Subtitles"),
1296 StreamType::Data => write!(f, "Data"),
1297 StreamType::None => write!(f, "-"),
1298 }
1299 }
1300 }
1301
1302 /// Stream data.
1303 #[allow(dead_code)]
1304 #[derive(Clone)]
1305 pub struct NAStream {
1306 media_type: StreamType,
1307 /// Stream ID.
1308 pub id: u32,
1309 num: usize,
1310 info: NACodecInfoRef,
1311 /// Timebase numerator.
1312 pub tb_num: u32,
1313 /// Timebase denominator.
1314 pub tb_den: u32,
1315 /// Duration in timebase units (zero if not available).
1316 pub duration: u64,
1317 }
1318
1319 /// A specialised reference-counted `NAStream` type.
1320 pub type NAStreamRef = Arc<NAStream>;
1321
1322 /// Downscales the timebase by its greatest common denominator.
1323 #[allow(clippy::comparison_chain)]
1324 pub fn reduce_timebase(tb_num: u32, tb_den: u32) -> (u32, u32) {
1325 if tb_num == 0 { return (tb_num, tb_den); }
1326 if (tb_den % tb_num) == 0 { return (1, tb_den / tb_num); }
1327
1328 let mut a = tb_num;
1329 let mut b = tb_den;
1330
1331 while a != b {
1332 if a > b { a -= b; }
1333 else if b > a { b -= a; }
1334 }
1335
1336 (tb_num / a, tb_den / a)
1337 }
1338
1339 impl NAStream {
1340 /// Constructs a new `NAStream` instance.
1341 pub fn new(mt: StreamType, id: u32, info: NACodecInfo, tb_num: u32, tb_den: u32, duration: u64) -> Self {
1342 let (n, d) = reduce_timebase(tb_num, tb_den);
1343 NAStream { media_type: mt, id, num: 0, info: info.into_ref(), tb_num: n, tb_den: d, duration }
1344 }
1345 /// Returns stream id.
1346 pub fn get_id(&self) -> u32 { self.id }
1347 /// Returns stream type.
1348 pub fn get_media_type(&self) -> StreamType { self.media_type }
1349 /// Returns stream number assigned by demuxer.
1350 pub fn get_num(&self) -> usize { self.num }
1351 /// Sets stream number.
1352 pub fn set_num(&mut self, num: usize) { self.num = num; }
1353 /// Returns codec information.
1354 pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() }
1355 /// Returns stream timebase.
1356 pub fn get_timebase(&self) -> (u32, u32) { (self.tb_num, self.tb_den) }
1357 /// Sets new stream timebase.
1358 pub fn set_timebase(&mut self, tb_num: u32, tb_den: u32) {
1359 let (n, d) = reduce_timebase(tb_num, tb_den);
1360 self.tb_num = n;
1361 self.tb_den = d;
1362 }
1363 /// Returns stream duration.
1364 pub fn get_duration(&self) -> u64 { self.duration }
1365 /// Constructs a new timestamp.
1366 pub fn make_ts(&self, pts: Option<u64>, dts: Option<u64>, duration: Option<u64>) -> NATimeInfo {
1367 NATimeInfo::new(pts, dts, duration, self.tb_num, self.tb_den)
1368 }
1369 /// Converts current instance into a reference-counted one.
1370 pub fn into_ref(self) -> NAStreamRef { Arc::new(self) }
1371 }
1372
1373 impl fmt::Display for NAStream {
1374 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1375 write!(f, "({}#{} @ {}/{} - {})", self.media_type, self.id, self.tb_num, self.tb_den, self.info.get_properties())
1376 }
1377 }
1378
1379 /// Side data that may accompany demuxed data.
1380 #[derive(Clone)]
1381 pub enum NASideData {
1382 /// Palette information.
1383 ///
1384 /// This side data contains a flag signalling that palette has changed since previous time and a reference to the current palette.
1385 /// Palette is stored in 8-bit RGBA format.
1386 Palette(bool, Arc<[u8; 1024]>),
1387 /// Generic user data.
1388 UserData(Arc<Vec<u8>>),
1389 }
1390
1391 /// Packet with compressed data.
1392 #[allow(dead_code)]
1393 pub struct NAPacket {
1394 stream: NAStreamRef,
1395 /// Packet timestamp.
1396 pub ts: NATimeInfo,
1397 buffer: NABufferRef<Vec<u8>>,
1398 /// Keyframe flag.
1399 pub keyframe: bool,
1400 // options: HashMap<String, NAValue<'a>>,
1401 /// Packet side data (e.g. palette for paletted formats).
1402 pub side_data: Vec<NASideData>,
1403 }
1404
1405 impl NAPacket {
1406 /// Constructs a new `NAPacket` instance.
1407 pub fn new(stream: NAStreamRef, ts: NATimeInfo, kf: bool, vec: Vec<u8>) -> Self {
1408 // let mut vec: Vec<u8> = Vec::new();
1409 // vec.resize(size, 0);
1410 NAPacket { stream, ts, keyframe: kf, buffer: NABufferRef::new(vec), side_data: Vec::new() }
1411 }
1412 /// Constructs a new `NAPacket` instance reusing a buffer reference.
1413 pub fn new_from_refbuf(stream: NAStreamRef, ts: NATimeInfo, kf: bool, buffer: NABufferRef<Vec<u8>>) -> Self {
1414 NAPacket { stream, ts, keyframe: kf, buffer, side_data: Vec::new() }
1415 }
1416 /// Returns information about the stream packet belongs to.
1417 pub fn get_stream(&self) -> NAStreamRef { self.stream.clone() }
1418 /// Returns packet timestamp.
1419 pub fn get_time_information(&self) -> NATimeInfo { self.ts }
1420 /// Returns packet presentation timestamp.
1421 pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() }
1422 /// Returns packet decoding timestamp.
1423 pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() }
1424 /// Returns packet duration.
1425 pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() }
1426 /// Reports whether this is a keyframe packet.
1427 pub fn is_keyframe(&self) -> bool { self.keyframe }
1428 /// Returns a reference to packet data.
1429 pub fn get_buffer(&self) -> NABufferRef<Vec<u8>> { self.buffer.clone() }
1430 /// Adds side data for a packet.
1431 pub fn add_side_data(&mut self, side_data: NASideData) { self.side_data.push(side_data); }
1432 /// Assigns packet to a new stream.
1433 pub fn reassign(&mut self, stream: NAStreamRef, ts: NATimeInfo) {
1434 self.stream = stream;
1435 self.ts = ts;
1436 }
1437 }
1438
1439 impl Drop for NAPacket {
1440 fn drop(&mut self) {}
1441 }
1442
1443 impl fmt::Display for NAPacket {
1444 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1445 let mut ostr = format!("[pkt for {} size {}", self.stream, self.buffer.len());
1446 if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); }
1447 if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); }
1448 if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); }
1449 if self.keyframe { ostr = format!("{} kf", ostr); }
1450 ostr += "]";
1451 write!(f, "{}", ostr)
1452 }
1453 }
1454
1455 /// Packet with a piece of data for a raw stream.
1456 pub struct NARawData {
1457 stream: NAStreamRef,
1458 buffer: NABufferRef<Vec<u8>>,
1459 }
1460
1461 impl NARawData {
1462 /// Constructs a new `NARawData` instance.
1463 pub fn new(stream: NAStreamRef, vec: Vec<u8>) -> Self {
1464 Self { stream, buffer: NABufferRef::new(vec) }
1465 }
1466 /// Constructs a new `NARawData` instance reusing a buffer reference.
1467 pub fn new_from_refbuf(stream: NAStreamRef, buffer: NABufferRef<Vec<u8>>) -> Self {
1468 Self { stream, buffer }
1469 }
1470 /// Returns information about the stream this data belongs to.
1471 pub fn get_stream(&self) -> NAStreamRef { self.stream.clone() }
1472 /// Returns a reference to packet data.
1473 pub fn get_buffer(&self) -> NABufferRef<Vec<u8>> { self.buffer.clone() }
1474 /// Assigns raw data to a new stream.
1475 pub fn reassign(&mut self, stream: NAStreamRef) {
1476 self.stream = stream;
1477 }
1478 }
1479
1480 impl fmt::Display for NARawData {
1481 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1482 write!(f, "[raw data for {} size {}]", self.stream, self.buffer.len())
1483 }
1484 }
1485
1486 #[cfg(test)]
1487 mod test {
1488 use super::*;
1489
1490 #[test]
1491 fn test_time_parse() {
1492 assert_eq!(NATimePoint::PTS(42).to_string(), "42pts");
1493 assert_eq!(NATimePoint::Milliseconds(4242000).to_string(), "1:10:42");
1494 assert_eq!(NATimePoint::Milliseconds(42424242).to_string(), "11:47:04.242");
1495 let ret = NATimePoint::from_str("42pts");
1496 assert_eq!(ret.unwrap(), NATimePoint::PTS(42));
1497 let ret = NATimePoint::from_str("1:2:3");
1498 assert_eq!(ret.unwrap(), NATimePoint::Milliseconds(3723000));
1499 let ret = NATimePoint::from_str("1:2:3.42");
1500 assert_eq!(ret.unwrap(), NATimePoint::Milliseconds(3723420));
1501 }
1502 }