fbccfbee0a5d3106f0e14f323110548e669d202c
[nihav.git] / nihav-core / src / frame.rs
1 //! Packets and decoded frames functionality.
2 use std::cmp::max;
3 //use std::collections::HashMap;
4 use std::fmt;
5 pub use std::sync::Arc;
6 pub use crate::formats::*;
7 pub use crate::refs::*;
8 use std::str::FromStr;
9
10 /// Audio stream information.
11 #[allow(dead_code)]
12 #[derive(Clone,Copy,PartialEq)]
13 pub struct NAAudioInfo {
14 /// Sample rate.
15 pub sample_rate: u32,
16 /// Number of channels.
17 pub channels: u8,
18 /// Audio sample format.
19 pub format: NASoniton,
20 /// Length of one audio block in samples.
21 pub block_len: usize,
22 }
23
24 impl NAAudioInfo {
25 /// Constructs a new `NAAudioInfo` instance.
26 pub fn new(sr: u32, ch: u8, fmt: NASoniton, bl: usize) -> Self {
27 NAAudioInfo { sample_rate: sr, channels: ch, format: fmt, block_len: bl }
28 }
29 /// Returns audio sample rate.
30 pub fn get_sample_rate(&self) -> u32 { self.sample_rate }
31 /// Returns the number of channels.
32 pub fn get_channels(&self) -> u8 { self.channels }
33 /// Returns sample format.
34 pub fn get_format(&self) -> NASoniton { self.format }
35 /// Returns one audio block duration in samples.
36 pub fn get_block_len(&self) -> usize { self.block_len }
37 }
38
39 impl fmt::Display for NAAudioInfo {
40 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
41 write!(f, "{} Hz, {} ch", self.sample_rate, self.channels)
42 }
43 }
44
45 /// Video stream information.
46 #[allow(dead_code)]
47 #[derive(Clone,Copy,PartialEq)]
48 pub struct NAVideoInfo {
49 /// Picture width.
50 pub width: usize,
51 /// Picture height.
52 pub height: usize,
53 /// Picture is stored downside up.
54 pub flipped: bool,
55 /// Picture pixel format.
56 pub format: NAPixelFormaton,
57 /// Declared bits per sample.
58 pub bits: u8,
59 }
60
61 impl NAVideoInfo {
62 /// Constructs a new `NAVideoInfo` instance.
63 pub fn new(w: usize, h: usize, flip: bool, fmt: NAPixelFormaton) -> Self {
64 let bits = fmt.get_total_depth();
65 NAVideoInfo { width: w, height: h, flipped: flip, format: fmt, bits }
66 }
67 /// Returns picture width.
68 pub fn get_width(&self) -> usize { self.width as usize }
69 /// Returns picture height.
70 pub fn get_height(&self) -> usize { self.height as usize }
71 /// Returns picture orientation.
72 pub fn is_flipped(&self) -> bool { self.flipped }
73 /// Returns picture pixel format.
74 pub fn get_format(&self) -> NAPixelFormaton { self.format }
75 /// Sets new picture width.
76 pub fn set_width(&mut self, w: usize) { self.width = w; }
77 /// Sets new picture height.
78 pub fn set_height(&mut self, h: usize) { self.height = h; }
79 }
80
81 impl fmt::Display for NAVideoInfo {
82 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
83 write!(f, "{}x{}", self.width, self.height)
84 }
85 }
86
87 /// A list of possible stream information types.
88 #[derive(Clone,Copy,PartialEq)]
89 pub enum NACodecTypeInfo {
90 /// No codec present.
91 None,
92 /// Audio codec information.
93 Audio(NAAudioInfo),
94 /// Video codec information.
95 Video(NAVideoInfo),
96 }
97
98 impl NACodecTypeInfo {
99 /// Returns video stream information.
100 pub fn get_video_info(&self) -> Option<NAVideoInfo> {
101 match *self {
102 NACodecTypeInfo::Video(vinfo) => Some(vinfo),
103 _ => None,
104 }
105 }
106 /// Returns audio stream information.
107 pub fn get_audio_info(&self) -> Option<NAAudioInfo> {
108 match *self {
109 NACodecTypeInfo::Audio(ainfo) => Some(ainfo),
110 _ => None,
111 }
112 }
113 /// Reports whether the current stream is video stream.
114 pub fn is_video(&self) -> bool {
115 match *self {
116 NACodecTypeInfo::Video(_) => true,
117 _ => false,
118 }
119 }
120 /// Reports whether the current stream is audio stream.
121 pub fn is_audio(&self) -> bool {
122 match *self {
123 NACodecTypeInfo::Audio(_) => true,
124 _ => false,
125 }
126 }
127 }
128
129 impl fmt::Display for NACodecTypeInfo {
130 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
131 let ret = match *self {
132 NACodecTypeInfo::None => "".to_string(),
133 NACodecTypeInfo::Audio(fmt) => format!("{}", fmt),
134 NACodecTypeInfo::Video(fmt) => format!("{}", fmt),
135 };
136 write!(f, "{}", ret)
137 }
138 }
139
140 /// Decoded video frame.
141 ///
142 /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer.
143 /// In case of image with several components those components are stored sequentially and can be accessed in the buffer starting at corresponding component offset.
144 #[derive(Clone)]
145 pub struct NAVideoBuffer<T> {
146 info: NAVideoInfo,
147 data: NABufferRef<Vec<T>>,
148 offs: Vec<usize>,
149 strides: Vec<usize>,
150 }
151
152 impl<T: Clone> NAVideoBuffer<T> {
153 /// Returns the component offset (0 for all unavailable offsets).
154 pub fn get_offset(&self, idx: usize) -> usize {
155 if idx >= self.offs.len() { 0 }
156 else { self.offs[idx] }
157 }
158 /// Returns picture info.
159 pub fn get_info(&self) -> NAVideoInfo { self.info }
160 /// Returns an immutable reference to the data.
161 pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() }
162 /// Returns a mutable reference to the data.
163 pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() }
164 /// Returns the number of components in picture format.
165 pub fn get_num_components(&self) -> usize { self.offs.len() }
166 /// Creates a copy of current `NAVideoBuffer`.
167 pub fn copy_buffer(&mut self) -> Self {
168 let mut data: Vec<T> = Vec::with_capacity(self.data.len());
169 data.clone_from(self.data.as_ref());
170 let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
171 offs.clone_from(&self.offs);
172 let mut strides: Vec<usize> = Vec::with_capacity(self.strides.len());
173 strides.clone_from(&self.strides);
174 NAVideoBuffer { info: self.info, data: NABufferRef::new(data), offs, strides }
175 }
176 /// Returns stride (distance between subsequent lines) for the requested component.
177 pub fn get_stride(&self, idx: usize) -> usize {
178 if idx >= self.strides.len() { return 0; }
179 self.strides[idx]
180 }
181 /// Returns requested component dimensions.
182 pub fn get_dimensions(&self, idx: usize) -> (usize, usize) {
183 get_plane_size(&self.info, idx)
184 }
185 /// Converts current instance into buffer reference.
186 pub fn into_ref(self) -> NABufferRef<Self> {
187 NABufferRef::new(self)
188 }
189
190 fn print_contents(&self, datatype: &str) {
191 println!("{} video buffer size {}", datatype, self.data.len());
192 println!(" format {}", self.info);
193 print!(" offsets:");
194 for off in self.offs.iter() {
195 print!(" {}", *off);
196 }
197 println!();
198 print!(" strides:");
199 for stride in self.strides.iter() {
200 print!(" {}", *stride);
201 }
202 println!();
203 }
204 }
205
206 /// A specialised type for reference-counted `NAVideoBuffer`.
207 pub type NAVideoBufferRef<T> = NABufferRef<NAVideoBuffer<T>>;
208
209 /// Decoded audio frame.
210 ///
211 /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer.
212 /// In case of planar audio samples for each channel are stored sequentially and can be accessed in the buffer starting at corresponding channel offset.
213 #[derive(Clone)]
214 pub struct NAAudioBuffer<T> {
215 info: NAAudioInfo,
216 data: NABufferRef<Vec<T>>,
217 offs: Vec<usize>,
218 stride: usize,
219 step: usize,
220 chmap: NAChannelMap,
221 len: usize,
222 }
223
224 impl<T: Clone> NAAudioBuffer<T> {
225 /// Returns the start position of requested channel data.
226 pub fn get_offset(&self, idx: usize) -> usize {
227 if idx >= self.offs.len() { 0 }
228 else { self.offs[idx] }
229 }
230 /// Returns the distance between the start of one channel and the next one.
231 pub fn get_stride(&self) -> usize { self.stride }
232 /// Returns the distance between the samples in one channel.
233 pub fn get_step(&self) -> usize { self.step }
234 /// Returns audio format information.
235 pub fn get_info(&self) -> NAAudioInfo { self.info }
236 /// Returns channel map.
237 pub fn get_chmap(&self) -> &NAChannelMap { &self.chmap }
238 /// Returns an immutable reference to the data.
239 pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() }
240 /// Returns reference to the data.
241 pub fn get_data_ref(&self) -> NABufferRef<Vec<T>> { self.data.clone() }
242 /// Returns a mutable reference to the data.
243 pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() }
244 /// Clones current `NAAudioBuffer` into a new one.
245 pub fn copy_buffer(&mut self) -> Self {
246 let mut data: Vec<T> = Vec::with_capacity(self.data.len());
247 data.clone_from(self.data.as_ref());
248 let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
249 offs.clone_from(&self.offs);
250 NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride, step: self.step }
251 }
252 /// Return the length of frame in samples.
253 pub fn get_length(&self) -> usize { self.len }
254 /// Truncates buffer length if possible.
255 ///
256 /// In case when new length is larger than old length nothing is done.
257 pub fn truncate(&mut self, new_len: usize) {
258 self.len = self.len.min(new_len);
259 }
260
261 fn print_contents(&self, datatype: &str) {
262 println!("Audio buffer with {} data, stride {}, step {}", datatype, self.stride, self.step);
263 println!(" format {}", self.info);
264 println!(" channel map {}", self.chmap);
265 print!(" offsets:");
266 for off in self.offs.iter() {
267 print!(" {}", *off);
268 }
269 println!();
270 }
271 }
272
273 impl NAAudioBuffer<u8> {
274 /// Constructs a new `NAAudioBuffer` instance.
275 pub fn new_from_buf(info: NAAudioInfo, data: NABufferRef<Vec<u8>>, chmap: NAChannelMap) -> Self {
276 let len = data.len();
277 NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0, step: 0 }
278 }
279 }
280
281 /// A list of possible decoded frame types.
282 #[derive(Clone)]
283 pub enum NABufferType {
284 /// 8-bit video buffer.
285 Video (NAVideoBufferRef<u8>),
286 /// 16-bit video buffer (i.e. every component or packed pixel fits into 16 bits).
287 Video16 (NAVideoBufferRef<u16>),
288 /// 32-bit video buffer (i.e. every component or packed pixel fits into 32 bits).
289 Video32 (NAVideoBufferRef<u32>),
290 /// Packed video buffer.
291 VideoPacked(NAVideoBufferRef<u8>),
292 /// Audio buffer with 8-bit unsigned integer audio.
293 AudioU8 (NAAudioBuffer<u8>),
294 /// Audio buffer with 16-bit signed integer audio.
295 AudioI16 (NAAudioBuffer<i16>),
296 /// Audio buffer with 32-bit signed integer audio.
297 AudioI32 (NAAudioBuffer<i32>),
298 /// Audio buffer with 32-bit floating point audio.
299 AudioF32 (NAAudioBuffer<f32>),
300 /// Packed audio buffer.
301 AudioPacked(NAAudioBuffer<u8>),
302 /// Buffer with generic data (e.g. subtitles).
303 Data (NABufferRef<Vec<u8>>),
304 /// No data present.
305 None,
306 }
307
308 impl NABufferType {
309 /// Returns the offset to the requested component or channel.
310 pub fn get_offset(&self, idx: usize) -> usize {
311 match *self {
312 NABufferType::Video(ref vb) => vb.get_offset(idx),
313 NABufferType::Video16(ref vb) => vb.get_offset(idx),
314 NABufferType::Video32(ref vb) => vb.get_offset(idx),
315 NABufferType::VideoPacked(ref vb) => vb.get_offset(idx),
316 NABufferType::AudioU8(ref ab) => ab.get_offset(idx),
317 NABufferType::AudioI16(ref ab) => ab.get_offset(idx),
318 NABufferType::AudioI32(ref ab) => ab.get_offset(idx),
319 NABufferType::AudioF32(ref ab) => ab.get_offset(idx),
320 NABufferType::AudioPacked(ref ab) => ab.get_offset(idx),
321 _ => 0,
322 }
323 }
324 /// Returns information for video frames.
325 pub fn get_video_info(&self) -> Option<NAVideoInfo> {
326 match *self {
327 NABufferType::Video(ref vb) => Some(vb.get_info()),
328 NABufferType::Video16(ref vb) => Some(vb.get_info()),
329 NABufferType::Video32(ref vb) => Some(vb.get_info()),
330 NABufferType::VideoPacked(ref vb) => Some(vb.get_info()),
331 _ => None,
332 }
333 }
334 /// Returns reference to 8-bit (or packed) video buffer.
335 pub fn get_vbuf(&self) -> Option<NAVideoBufferRef<u8>> {
336 match *self {
337 NABufferType::Video(ref vb) => Some(vb.clone()),
338 NABufferType::VideoPacked(ref vb) => Some(vb.clone()),
339 _ => None,
340 }
341 }
342 /// Returns reference to 16-bit video buffer.
343 pub fn get_vbuf16(&self) -> Option<NAVideoBufferRef<u16>> {
344 match *self {
345 NABufferType::Video16(ref vb) => Some(vb.clone()),
346 _ => None,
347 }
348 }
349 /// Returns reference to 32-bit video buffer.
350 pub fn get_vbuf32(&self) -> Option<NAVideoBufferRef<u32>> {
351 match *self {
352 NABufferType::Video32(ref vb) => Some(vb.clone()),
353 _ => None,
354 }
355 }
356 /// Returns information for audio frames.
357 pub fn get_audio_info(&self) -> Option<NAAudioInfo> {
358 match *self {
359 NABufferType::AudioU8(ref ab) => Some(ab.get_info()),
360 NABufferType::AudioI16(ref ab) => Some(ab.get_info()),
361 NABufferType::AudioI32(ref ab) => Some(ab.get_info()),
362 NABufferType::AudioF32(ref ab) => Some(ab.get_info()),
363 NABufferType::AudioPacked(ref ab) => Some(ab.get_info()),
364 _ => None,
365 }
366 }
367 /// Returns audio channel map.
368 pub fn get_chmap(&self) -> Option<&NAChannelMap> {
369 match *self {
370 NABufferType::AudioU8(ref ab) => Some(ab.get_chmap()),
371 NABufferType::AudioI16(ref ab) => Some(ab.get_chmap()),
372 NABufferType::AudioI32(ref ab) => Some(ab.get_chmap()),
373 NABufferType::AudioF32(ref ab) => Some(ab.get_chmap()),
374 NABufferType::AudioPacked(ref ab) => Some(ab.get_chmap()),
375 _ => None,
376 }
377 }
378 /// Returns audio frame duration in samples.
379 pub fn get_audio_length(&self) -> usize {
380 match *self {
381 NABufferType::AudioU8(ref ab) => ab.get_length(),
382 NABufferType::AudioI16(ref ab) => ab.get_length(),
383 NABufferType::AudioI32(ref ab) => ab.get_length(),
384 NABufferType::AudioF32(ref ab) => ab.get_length(),
385 NABufferType::AudioPacked(ref ab) => ab.get_length(),
386 _ => 0,
387 }
388 }
389 /// Returns the distance between starts of two channels.
390 pub fn get_audio_stride(&self) -> usize {
391 match *self {
392 NABufferType::AudioU8(ref ab) => ab.get_stride(),
393 NABufferType::AudioI16(ref ab) => ab.get_stride(),
394 NABufferType::AudioI32(ref ab) => ab.get_stride(),
395 NABufferType::AudioF32(ref ab) => ab.get_stride(),
396 NABufferType::AudioPacked(ref ab) => ab.get_stride(),
397 _ => 0,
398 }
399 }
400 /// Returns the distance between two samples in one channel.
401 pub fn get_audio_step(&self) -> usize {
402 match *self {
403 NABufferType::AudioU8(ref ab) => ab.get_step(),
404 NABufferType::AudioI16(ref ab) => ab.get_step(),
405 NABufferType::AudioI32(ref ab) => ab.get_step(),
406 NABufferType::AudioF32(ref ab) => ab.get_step(),
407 NABufferType::AudioPacked(ref ab) => ab.get_step(),
408 _ => 0,
409 }
410 }
411 /// Returns reference to 8-bit (or packed) audio buffer.
412 pub fn get_abuf_u8(&self) -> Option<NAAudioBuffer<u8>> {
413 match *self {
414 NABufferType::AudioU8(ref ab) => Some(ab.clone()),
415 NABufferType::AudioPacked(ref ab) => Some(ab.clone()),
416 _ => None,
417 }
418 }
419 /// Returns reference to 16-bit audio buffer.
420 pub fn get_abuf_i16(&self) -> Option<NAAudioBuffer<i16>> {
421 match *self {
422 NABufferType::AudioI16(ref ab) => Some(ab.clone()),
423 _ => None,
424 }
425 }
426 /// Returns reference to 32-bit integer audio buffer.
427 pub fn get_abuf_i32(&self) -> Option<NAAudioBuffer<i32>> {
428 match *self {
429 NABufferType::AudioI32(ref ab) => Some(ab.clone()),
430 _ => None,
431 }
432 }
433 /// Returns reference to 32-bit floating point audio buffer.
434 pub fn get_abuf_f32(&self) -> Option<NAAudioBuffer<f32>> {
435 match *self {
436 NABufferType::AudioF32(ref ab) => Some(ab.clone()),
437 _ => None,
438 }
439 }
440 /// Prints internal buffer layout.
441 pub fn print_buffer_metadata(&self) {
442 match *self {
443 NABufferType::Video(ref buf) => buf.print_contents("8-bit"),
444 NABufferType::Video16(ref buf) => buf.print_contents("16-bit"),
445 NABufferType::Video32(ref buf) => buf.print_contents("32-bit"),
446 NABufferType::VideoPacked(ref buf) => buf.print_contents("packed"),
447 NABufferType::AudioU8(ref buf) => buf.print_contents("8-bit unsigned integer"),
448 NABufferType::AudioI16(ref buf) => buf.print_contents("16-bit integer"),
449 NABufferType::AudioI32(ref buf) => buf.print_contents("32-bit integer"),
450 NABufferType::AudioF32(ref buf) => buf.print_contents("32-bit float"),
451 NABufferType::AudioPacked(ref buf) => buf.print_contents("packed"),
452 NABufferType::Data(ref buf) => { println!("Data buffer, len = {}", buf.len()); },
453 NABufferType::None => { println!("No buffer"); },
454 };
455 }
456 }
457
458 const NA_SIMPLE_VFRAME_COMPONENTS: usize = 4;
459 /// Simplified decoded frame data.
460 pub struct NASimpleVideoFrame<'a, T: Copy> {
461 /// Widths of each picture component.
462 pub width: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
463 /// Heights of each picture component.
464 pub height: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
465 /// Orientation (upside-down or downside-up) flag.
466 pub flip: bool,
467 /// Strides for each component.
468 pub stride: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
469 /// Start of each component.
470 pub offset: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
471 /// Number of components.
472 pub components: usize,
473 /// Pointer to the picture pixel data.
474 pub data: &'a mut [T],
475 }
476
477 impl<'a, T:Copy> NASimpleVideoFrame<'a, T> {
478 /// Constructs a new instance of `NASimpleVideoFrame` from `NAVideoBuffer`.
479 pub fn from_video_buf(vbuf: &'a mut NAVideoBuffer<T>) -> Option<Self> {
480 let vinfo = vbuf.get_info();
481 let components = vinfo.format.components as usize;
482 if components > NA_SIMPLE_VFRAME_COMPONENTS {
483 return None;
484 }
485 let mut w: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
486 let mut h: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
487 let mut s: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
488 let mut o: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
489 for comp in 0..components {
490 let (width, height) = vbuf.get_dimensions(comp);
491 w[comp] = width;
492 h[comp] = height;
493 s[comp] = vbuf.get_stride(comp);
494 o[comp] = vbuf.get_offset(comp);
495 }
496 let flip = vinfo.flipped;
497 Some(NASimpleVideoFrame {
498 width: w,
499 height: h,
500 flip,
501 stride: s,
502 offset: o,
503 components,
504 data: vbuf.data.as_mut_slice(),
505 })
506 }
507 }
508
509 /// A list of possible frame allocator errors.
510 #[derive(Debug,Clone,Copy,PartialEq)]
511 pub enum AllocatorError {
512 /// Requested picture dimensions are too large.
513 TooLargeDimensions,
514 /// Invalid input format.
515 FormatError,
516 }
517
518 /// Constructs a new video buffer with requested format.
519 ///
520 /// `align` is power of two alignment for image. E.g. the value of 5 means that frame dimensions will be padded to be multiple of 32.
521 pub fn alloc_video_buffer(vinfo: NAVideoInfo, align: u8) -> Result<NABufferType, AllocatorError> {
522 let fmt = &vinfo.format;
523 let mut new_size: usize = 0;
524 let mut offs: Vec<usize> = Vec::new();
525 let mut strides: Vec<usize> = Vec::new();
526
527 for i in 0..fmt.get_num_comp() {
528 if fmt.get_chromaton(i) == None { return Err(AllocatorError::FormatError); }
529 }
530
531 let align_mod = ((1 << align) as usize) - 1;
532 let width = ((vinfo.width as usize) + align_mod) & !align_mod;
533 let height = ((vinfo.height as usize) + align_mod) & !align_mod;
534 let mut max_depth = 0;
535 let mut all_packed = true;
536 let mut all_bytealigned = true;
537 for i in 0..fmt.get_num_comp() {
538 let ochr = fmt.get_chromaton(i);
539 if ochr.is_none() { continue; }
540 let chr = ochr.unwrap();
541 if !chr.is_packed() {
542 all_packed = false;
543 } else if ((chr.get_shift() + chr.get_depth()) & 7) != 0 {
544 all_bytealigned = false;
545 }
546 max_depth = max(max_depth, chr.get_depth());
547 }
548 let unfit_elem_size = match fmt.get_elem_size() {
549 2 | 4 => false,
550 _ => true,
551 };
552
553 //todo semi-packed like NV12
554 if fmt.is_paletted() {
555 //todo various-sized palettes?
556 let stride = vinfo.get_format().get_chromaton(0).unwrap().get_linesize(width);
557 let pic_sz = stride.checked_mul(height);
558 if pic_sz == None { return Err(AllocatorError::TooLargeDimensions); }
559 let pal_size = 256 * (fmt.get_elem_size() as usize);
560 let new_size = pic_sz.unwrap().checked_add(pal_size);
561 if new_size == None { return Err(AllocatorError::TooLargeDimensions); }
562 offs.push(0);
563 offs.push(stride * height);
564 strides.push(stride);
565 let data: Vec<u8> = vec![0; new_size.unwrap()];
566 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
567 Ok(NABufferType::Video(buf.into_ref()))
568 } else if !all_packed {
569 for i in 0..fmt.get_num_comp() {
570 let ochr = fmt.get_chromaton(i);
571 if ochr.is_none() { continue; }
572 let chr = ochr.unwrap();
573 offs.push(new_size as usize);
574 let stride = chr.get_linesize(width);
575 let cur_h = chr.get_height(height);
576 let cur_sz = stride.checked_mul(cur_h);
577 if cur_sz == None { return Err(AllocatorError::TooLargeDimensions); }
578 let new_sz = new_size.checked_add(cur_sz.unwrap());
579 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
580 new_size = new_sz.unwrap();
581 strides.push(stride);
582 }
583 if max_depth <= 8 {
584 let data: Vec<u8> = vec![0; new_size];
585 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
586 Ok(NABufferType::Video(buf.into_ref()))
587 } else if max_depth <= 16 {
588 let data: Vec<u16> = vec![0; new_size];
589 let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
590 Ok(NABufferType::Video16(buf.into_ref()))
591 } else {
592 let data: Vec<u32> = vec![0; new_size];
593 let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
594 Ok(NABufferType::Video32(buf.into_ref()))
595 }
596 } else if all_bytealigned || unfit_elem_size {
597 let elem_sz = fmt.get_elem_size();
598 let line_sz = width.checked_mul(elem_sz as usize);
599 if line_sz == None { return Err(AllocatorError::TooLargeDimensions); }
600 let new_sz = line_sz.unwrap().checked_mul(height);
601 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
602 new_size = new_sz.unwrap();
603 let data: Vec<u8> = vec![0; new_size];
604 strides.push(line_sz.unwrap());
605 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
606 Ok(NABufferType::VideoPacked(buf.into_ref()))
607 } else {
608 let elem_sz = fmt.get_elem_size();
609 let new_sz = width.checked_mul(height);
610 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
611 new_size = new_sz.unwrap();
612 match elem_sz {
613 2 => {
614 let data: Vec<u16> = vec![0; new_size];
615 strides.push(width);
616 let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
617 Ok(NABufferType::Video16(buf.into_ref()))
618 },
619 4 => {
620 let data: Vec<u32> = vec![0; new_size];
621 strides.push(width);
622 let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
623 Ok(NABufferType::Video32(buf.into_ref()))
624 },
625 _ => unreachable!(),
626 }
627 }
628 }
629
630 /// Constructs a new audio buffer for the requested format and length.
631 #[allow(clippy::collapsible_if)]
632 pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelMap) -> Result<NABufferType, AllocatorError> {
633 let mut offs: Vec<usize> = Vec::new();
634 if ainfo.format.is_planar() || ((ainfo.format.get_bits() % 8) == 0) {
635 let len = nsamples.checked_mul(ainfo.channels as usize);
636 if len == None { return Err(AllocatorError::TooLargeDimensions); }
637 let length = len.unwrap();
638 let stride;
639 let step;
640 if ainfo.format.is_planar() {
641 stride = nsamples;
642 step = 1;
643 for i in 0..ainfo.channels {
644 offs.push((i as usize) * stride);
645 }
646 } else {
647 stride = 1;
648 step = ainfo.channels as usize;
649 for i in 0..ainfo.channels {
650 offs.push(i as usize);
651 }
652 }
653 if ainfo.format.is_float() {
654 if ainfo.format.get_bits() == 32 {
655 let data: Vec<f32> = vec![0.0; length];
656 let buf: NAAudioBuffer<f32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
657 Ok(NABufferType::AudioF32(buf))
658 } else {
659 Err(AllocatorError::TooLargeDimensions)
660 }
661 } else {
662 if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() {
663 let data: Vec<u8> = vec![0; length];
664 let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
665 Ok(NABufferType::AudioU8(buf))
666 } else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() {
667 let data: Vec<i16> = vec![0; length];
668 let buf: NAAudioBuffer<i16> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
669 Ok(NABufferType::AudioI16(buf))
670 } else if ainfo.format.get_bits() == 32 && ainfo.format.is_signed() {
671 let data: Vec<i32> = vec![0; length];
672 let buf: NAAudioBuffer<i32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
673 Ok(NABufferType::AudioI32(buf))
674 } else {
675 Err(AllocatorError::TooLargeDimensions)
676 }
677 }
678 } else {
679 let len = nsamples.checked_mul(ainfo.channels as usize);
680 if len == None { return Err(AllocatorError::TooLargeDimensions); }
681 let length = ainfo.format.get_audio_size(len.unwrap() as u64);
682 let data: Vec<u8> = vec![0; length];
683 let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0, step: 0 };
684 Ok(NABufferType::AudioPacked(buf))
685 }
686 }
687
688 /// Constructs a new buffer for generic data.
689 pub fn alloc_data_buffer(size: usize) -> Result<NABufferType, AllocatorError> {
690 let data: Vec<u8> = vec![0; size];
691 let buf: NABufferRef<Vec<u8>> = NABufferRef::new(data);
692 Ok(NABufferType::Data(buf))
693 }
694
695 /// Creates a clone of current buffer.
696 pub fn copy_buffer(buf: NABufferType) -> NABufferType {
697 buf.clone()
698 }
699
700 /// Video frame pool.
701 ///
702 /// This structure allows codec to effectively reuse old frames instead of allocating and de-allocating frames every time.
703 /// Caller can also reserve some frames for its own purposes e.g. display queue.
704 pub struct NAVideoBufferPool<T:Copy> {
705 pool: Vec<NAVideoBufferRef<T>>,
706 max_len: usize,
707 add_len: usize,
708 }
709
710 impl<T:Copy> NAVideoBufferPool<T> {
711 /// Constructs a new `NAVideoBufferPool` instance.
712 pub fn new(max_len: usize) -> Self {
713 Self {
714 pool: Vec::with_capacity(max_len),
715 max_len,
716 add_len: 0,
717 }
718 }
719 /// Sets the number of buffers reserved for the user.
720 pub fn set_dec_bufs(&mut self, add_len: usize) {
721 self.add_len = add_len;
722 }
723 /// Returns an unused buffer from the pool.
724 pub fn get_free(&mut self) -> Option<NAVideoBufferRef<T>> {
725 for e in self.pool.iter() {
726 if e.get_num_refs() == 1 {
727 return Some(e.clone());
728 }
729 }
730 None
731 }
732 /// Clones provided frame data into a free pool frame.
733 pub fn get_copy(&mut self, rbuf: &NAVideoBufferRef<T>) -> Option<NAVideoBufferRef<T>> {
734 let mut dbuf = self.get_free()?;
735 dbuf.data.copy_from_slice(&rbuf.data);
736 Some(dbuf)
737 }
738 /// Clears the pool from all frames.
739 pub fn reset(&mut self) {
740 self.pool.truncate(0);
741 }
742 }
743
744 impl NAVideoBufferPool<u8> {
745 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
746 ///
747 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
748 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
749 let nbufs = self.max_len + self.add_len - self.pool.len();
750 for _ in 0..nbufs {
751 let vbuf = alloc_video_buffer(vinfo, align)?;
752 if let NABufferType::Video(buf) = vbuf {
753 self.pool.push(buf);
754 } else if let NABufferType::VideoPacked(buf) = vbuf {
755 self.pool.push(buf);
756 } else {
757 return Err(AllocatorError::FormatError);
758 }
759 }
760 Ok(())
761 }
762 }
763
764 impl NAVideoBufferPool<u16> {
765 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
766 ///
767 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
768 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
769 let nbufs = self.max_len + self.add_len - self.pool.len();
770 for _ in 0..nbufs {
771 let vbuf = alloc_video_buffer(vinfo, align)?;
772 if let NABufferType::Video16(buf) = vbuf {
773 self.pool.push(buf);
774 } else {
775 return Err(AllocatorError::FormatError);
776 }
777 }
778 Ok(())
779 }
780 }
781
782 impl NAVideoBufferPool<u32> {
783 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
784 ///
785 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
786 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
787 let nbufs = self.max_len + self.add_len - self.pool.len();
788 for _ in 0..nbufs {
789 let vbuf = alloc_video_buffer(vinfo, align)?;
790 if let NABufferType::Video32(buf) = vbuf {
791 self.pool.push(buf);
792 } else {
793 return Err(AllocatorError::FormatError);
794 }
795 }
796 Ok(())
797 }
798 }
799
800 /// Information about codec contained in a stream.
801 #[allow(dead_code)]
802 #[derive(Clone)]
803 pub struct NACodecInfo {
804 name: &'static str,
805 properties: NACodecTypeInfo,
806 extradata: Option<Arc<Vec<u8>>>,
807 }
808
809 /// A specialised type for reference-counted `NACodecInfo`.
810 pub type NACodecInfoRef = Arc<NACodecInfo>;
811
812 impl NACodecInfo {
813 /// Constructs a new instance of `NACodecInfo`.
814 pub fn new(name: &'static str, p: NACodecTypeInfo, edata: Option<Vec<u8>>) -> Self {
815 let extradata = match edata {
816 None => None,
817 Some(vec) => Some(Arc::new(vec)),
818 };
819 NACodecInfo { name, properties: p, extradata }
820 }
821 /// Constructs a new reference-counted instance of `NACodecInfo`.
822 pub fn new_ref(name: &'static str, p: NACodecTypeInfo, edata: Option<Arc<Vec<u8>>>) -> Self {
823 NACodecInfo { name, properties: p, extradata: edata }
824 }
825 /// Converts current instance into a reference-counted one.
826 pub fn into_ref(self) -> NACodecInfoRef { Arc::new(self) }
827 /// Returns codec information.
828 pub fn get_properties(&self) -> NACodecTypeInfo { self.properties }
829 /// Returns additional initialisation data required by the codec.
830 pub fn get_extradata(&self) -> Option<Arc<Vec<u8>>> {
831 if let Some(ref vec) = self.extradata { return Some(vec.clone()); }
832 None
833 }
834 /// Returns codec name.
835 pub fn get_name(&self) -> &'static str { self.name }
836 /// Reports whether it is a video codec.
837 pub fn is_video(&self) -> bool {
838 if let NACodecTypeInfo::Video(_) = self.properties { return true; }
839 false
840 }
841 /// Reports whether it is an audio codec.
842 pub fn is_audio(&self) -> bool {
843 if let NACodecTypeInfo::Audio(_) = self.properties { return true; }
844 false
845 }
846 /// Constructs a new empty reference-counted instance of `NACodecInfo`.
847 pub fn new_dummy() -> Arc<Self> {
848 Arc::new(DUMMY_CODEC_INFO)
849 }
850 /// Updates codec infomation.
851 pub fn replace_info(&self, p: NACodecTypeInfo) -> Arc<Self> {
852 Arc::new(NACodecInfo { name: self.name, properties: p, extradata: self.extradata.clone() })
853 }
854 }
855
856 impl Default for NACodecInfo {
857 fn default() -> Self { DUMMY_CODEC_INFO }
858 }
859
860 impl fmt::Display for NACodecInfo {
861 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
862 let edata = match self.extradata.clone() {
863 None => "no extradata".to_string(),
864 Some(v) => format!("{} byte(s) of extradata", v.len()),
865 };
866 write!(f, "{}: {} {}", self.name, self.properties, edata)
867 }
868 }
869
870 /// Default empty codec information.
871 pub const DUMMY_CODEC_INFO: NACodecInfo = NACodecInfo {
872 name: "none",
873 properties: NACodecTypeInfo::None,
874 extradata: None };
875
876 /// A list of recognized frame types.
877 #[derive(Debug,Clone,Copy,PartialEq)]
878 #[allow(dead_code)]
879 pub enum FrameType {
880 /// Intra frame type.
881 I,
882 /// Inter frame type.
883 P,
884 /// Bidirectionally predicted frame.
885 B,
886 /// Skip frame.
887 ///
888 /// When such frame is encountered then last frame should be used again if it is needed.
889 Skip,
890 /// Some other frame type.
891 Other,
892 }
893
894 impl fmt::Display for FrameType {
895 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
896 match *self {
897 FrameType::I => write!(f, "I"),
898 FrameType::P => write!(f, "P"),
899 FrameType::B => write!(f, "B"),
900 FrameType::Skip => write!(f, "skip"),
901 FrameType::Other => write!(f, "x"),
902 }
903 }
904 }
905
906 /// Timestamp information.
907 #[derive(Debug,Clone,Copy)]
908 pub struct NATimeInfo {
909 /// Presentation timestamp.
910 pub pts: Option<u64>,
911 /// Decode timestamp.
912 pub dts: Option<u64>,
913 /// Duration (in timebase units).
914 pub duration: Option<u64>,
915 /// Timebase numerator.
916 pub tb_num: u32,
917 /// Timebase denominator.
918 pub tb_den: u32,
919 }
920
921 impl NATimeInfo {
922 /// Constructs a new `NATimeInfo` instance.
923 pub fn new(pts: Option<u64>, dts: Option<u64>, duration: Option<u64>, tb_num: u32, tb_den: u32) -> Self {
924 NATimeInfo { pts, dts, duration, tb_num, tb_den }
925 }
926 /// Returns presentation timestamp.
927 pub fn get_pts(&self) -> Option<u64> { self.pts }
928 /// Returns decoding timestamp.
929 pub fn get_dts(&self) -> Option<u64> { self.dts }
930 /// Returns duration.
931 pub fn get_duration(&self) -> Option<u64> { self.duration }
932 /// Sets new presentation timestamp.
933 pub fn set_pts(&mut self, pts: Option<u64>) { self.pts = pts; }
934 /// Sets new decoding timestamp.
935 pub fn set_dts(&mut self, dts: Option<u64>) { self.dts = dts; }
936 /// Sets new duration.
937 pub fn set_duration(&mut self, dur: Option<u64>) { self.duration = dur; }
938
939 /// Converts time in given scale into timestamp in given base.
940 pub fn time_to_ts(time: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 {
941 let tb_num = u64::from(tb_num);
942 let tb_den = u64::from(tb_den);
943 let tmp = time.checked_mul(tb_num);
944 if let Some(tmp) = tmp {
945 tmp / base / tb_den
946 } else {
947 let tmp = time.checked_mul(tb_num);
948 if let Some(tmp) = tmp {
949 tmp / base / tb_den
950 } else {
951 let coarse = time / base;
952 let tmp = coarse.checked_mul(tb_num);
953 if let Some(tmp) = tmp {
954 tmp / tb_den
955 } else {
956 (coarse / tb_den) * tb_num
957 }
958 }
959 }
960 }
961 /// Converts timestamp in given base into time in given scale.
962 pub fn ts_to_time(ts: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 {
963 let tb_num = u64::from(tb_num);
964 let tb_den = u64::from(tb_den);
965 let tmp = ts.checked_mul(base);
966 if let Some(tmp) = tmp {
967 let tmp2 = tmp.checked_mul(tb_num);
968 if let Some(tmp2) = tmp2 {
969 tmp2 / tb_den
970 } else {
971 (tmp / tb_den) * tb_num
972 }
973 } else {
974 let tmp = ts.checked_mul(tb_num);
975 if let Some(tmp) = tmp {
976 (tmp / tb_den) * base
977 } else {
978 (ts / tb_den) * base * tb_num
979 }
980 }
981 }
982 fn get_cur_ts(&self) -> u64 { self.pts.unwrap_or_else(|| self.dts.unwrap_or(0)) }
983 fn get_cur_millis(&self) -> u64 {
984 let ts = self.get_cur_ts();
985 Self::ts_to_time(ts, 1000, self.tb_num, self.tb_den)
986 }
987 /// Checks whether the current time information is earler than provided reference time.
988 pub fn less_than(&self, time: NATimePoint) -> bool {
989 if self.pts.is_none() && self.dts.is_none() {
990 return true;
991 }
992 match time {
993 NATimePoint::PTS(rpts) => self.get_cur_ts() < rpts,
994 NATimePoint::Milliseconds(ms) => self.get_cur_millis() < ms,
995 NATimePoint::None => false,
996 }
997 }
998 /// Checks whether the current time information is the same as provided reference time.
999 pub fn equal(&self, time: NATimePoint) -> bool {
1000 if self.pts.is_none() && self.dts.is_none() {
1001 return time == NATimePoint::None;
1002 }
1003 match time {
1004 NATimePoint::PTS(rpts) => self.get_cur_ts() == rpts,
1005 NATimePoint::Milliseconds(ms) => self.get_cur_millis() == ms,
1006 NATimePoint::None => false,
1007 }
1008 }
1009 }
1010
1011 /// Time information for specifying durations or seek positions.
1012 #[derive(Clone,Copy,Debug,PartialEq)]
1013 pub enum NATimePoint {
1014 /// Time in milliseconds.
1015 Milliseconds(u64),
1016 /// Stream timestamp.
1017 PTS(u64),
1018 /// No time information present.
1019 None,
1020 }
1021
1022 impl Default for NATimePoint {
1023 fn default() -> Self {
1024 NATimePoint::None
1025 }
1026 }
1027
1028 impl fmt::Display for NATimePoint {
1029 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1030 match *self {
1031 NATimePoint::Milliseconds(millis) => {
1032 let tot_s = millis / 1000;
1033 let ms = millis % 1000;
1034 if tot_s < 60 {
1035 if ms != 0 {
1036 return write!(f, "{}.{:03}", tot_s, ms);
1037 } else {
1038 return write!(f, "{}", tot_s);
1039 }
1040 }
1041 let tot_m = tot_s / 60;
1042 let s = tot_s % 60;
1043 if tot_m < 60 {
1044 if ms != 0 {
1045 return write!(f, "{}:{:02}.{:03}", tot_m, s, ms);
1046 } else {
1047 return write!(f, "{}:{:02}", tot_m, s);
1048 }
1049 }
1050 let h = tot_m / 60;
1051 let m = tot_m % 60;
1052 if ms != 0 {
1053 write!(f, "{}:{:02}:{:02}.{:03}", h, m, s, ms)
1054 } else {
1055 write!(f, "{}:{:02}:{:02}", h, m, s)
1056 }
1057 },
1058 NATimePoint::PTS(pts) => {
1059 write!(f, "{}pts", pts)
1060 },
1061 NATimePoint::None => {
1062 write!(f, "none")
1063 },
1064 }
1065 }
1066 }
1067
1068 impl FromStr for NATimePoint {
1069 type Err = FormatParseError;
1070
1071 /// Parses the string into time information.
1072 ///
1073 /// Accepted formats are `<u64>pts`, `<u64>ms` or `[hh:][mm:]ss[.ms]`.
1074 fn from_str(s: &str) -> Result<Self, Self::Err> {
1075 if s.is_empty() {
1076 return Err(FormatParseError {});
1077 }
1078 if !s.ends_with("pts") {
1079 if s.ends_with("ms") {
1080 let str_b = s.as_bytes();
1081 let num = std::str::from_utf8(&str_b[..str_b.len() - 2]).unwrap();
1082 let ret = num.parse::<u64>();
1083 if let Ok(val) = ret {
1084 return Ok(NATimePoint::Milliseconds(val));
1085 } else {
1086 return Err(FormatParseError {});
1087 }
1088 }
1089 let mut parts = s.split(':');
1090 let mut hrs = None;
1091 let mut mins = None;
1092 let mut secs = parts.next();
1093 if let Some(part) = parts.next() {
1094 std::mem::swap(&mut mins, &mut secs);
1095 secs = Some(part);
1096 }
1097 if let Some(part) = parts.next() {
1098 std::mem::swap(&mut hrs, &mut mins);
1099 std::mem::swap(&mut mins, &mut secs);
1100 secs = Some(part);
1101 }
1102 if parts.next().is_some() {
1103 return Err(FormatParseError {});
1104 }
1105 let hours = if let Some(val) = hrs {
1106 let ret = val.parse::<u64>();
1107 if ret.is_err() { return Err(FormatParseError {}); }
1108 let val = ret.unwrap();
1109 if val > 1000 { return Err(FormatParseError {}); }
1110 val
1111 } else { 0 };
1112 let minutes = if let Some(val) = mins {
1113 let ret = val.parse::<u64>();
1114 if ret.is_err() { return Err(FormatParseError {}); }
1115 let val = ret.unwrap();
1116 if val >= 60 { return Err(FormatParseError {}); }
1117 val
1118 } else { 0 };
1119 let (seconds, millis) = if let Some(val) = secs {
1120 let mut parts = val.split('.');
1121 let ret = parts.next().unwrap().parse::<u64>();
1122 if ret.is_err() { return Err(FormatParseError {}); }
1123 let seconds = ret.unwrap();
1124 if mins.is_some() && seconds >= 60 { return Err(FormatParseError {}); }
1125 let millis = if let Some(val) = parts.next() {
1126 let mut mval = 0;
1127 let mut base = 0;
1128 for ch in val.chars() {
1129 if ch >= '0' && ch <= '9' {
1130 mval = mval * 10 + u64::from((ch as u8) - b'0');
1131 base += 1;
1132 if base > 3 { break; }
1133 } else {
1134 return Err(FormatParseError {});
1135 }
1136 }
1137 while base < 3 {
1138 mval *= 10;
1139 base += 1;
1140 }
1141 mval
1142 } else { 0 };
1143 (seconds, millis)
1144 } else { unreachable!(); };
1145 let tot_secs = hours * 60 * 60 + minutes * 60 + seconds;
1146 Ok(NATimePoint::Milliseconds(tot_secs * 1000 + millis))
1147 } else {
1148 let str_b = s.as_bytes();
1149 let num = std::str::from_utf8(&str_b[..str_b.len() - 3]).unwrap();
1150 let ret = num.parse::<u64>();
1151 if let Ok(val) = ret {
1152 Ok(NATimePoint::PTS(val))
1153 } else {
1154 Err(FormatParseError {})
1155 }
1156 }
1157 }
1158 }
1159
1160 /// Decoded frame information.
1161 #[allow(dead_code)]
1162 #[derive(Clone)]
1163 pub struct NAFrame {
1164 /// Frame timestamp.
1165 pub ts: NATimeInfo,
1166 /// Frame ID.
1167 pub id: i64,
1168 buffer: NABufferType,
1169 info: NACodecInfoRef,
1170 /// Frame type.
1171 pub frame_type: FrameType,
1172 /// Keyframe flag.
1173 pub key: bool,
1174 // options: HashMap<String, NAValue>,
1175 }
1176
1177 /// A specialised type for reference-counted `NAFrame`.
1178 pub type NAFrameRef = Arc<NAFrame>;
1179
1180 fn get_plane_size(info: &NAVideoInfo, idx: usize) -> (usize, usize) {
1181 let chromaton = info.get_format().get_chromaton(idx);
1182 if chromaton.is_none() { return (0, 0); }
1183 let (hs, vs) = chromaton.unwrap().get_subsampling();
1184 let w = (info.get_width() + ((1 << hs) - 1)) >> hs;
1185 let h = (info.get_height() + ((1 << vs) - 1)) >> vs;
1186 (w, h)
1187 }
1188
1189 impl NAFrame {
1190 /// Constructs a new `NAFrame` instance.
1191 pub fn new(ts: NATimeInfo,
1192 ftype: FrameType,
1193 keyframe: bool,
1194 info: NACodecInfoRef,
1195 /*options: HashMap<String, NAValue>,*/
1196 buffer: NABufferType) -> Self {
1197 NAFrame { ts, id: 0, buffer, info, frame_type: ftype, key: keyframe/*, options*/ }
1198 }
1199 /// Returns frame format information.
1200 pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() }
1201 /// Returns frame type.
1202 pub fn get_frame_type(&self) -> FrameType { self.frame_type }
1203 /// Reports whether the frame is a keyframe.
1204 pub fn is_keyframe(&self) -> bool { self.key }
1205 /// Sets new frame type.
1206 pub fn set_frame_type(&mut self, ftype: FrameType) { self.frame_type = ftype; }
1207 /// Sets keyframe flag.
1208 pub fn set_keyframe(&mut self, key: bool) { self.key = key; }
1209 /// Returns frame timestamp.
1210 pub fn get_time_information(&self) -> NATimeInfo { self.ts }
1211 /// Returns frame presentation time.
1212 pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() }
1213 /// Returns frame decoding time.
1214 pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() }
1215 /// Returns picture ID.
1216 pub fn get_id(&self) -> i64 { self.id }
1217 /// Returns frame display duration.
1218 pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() }
1219 /// Sets new presentation timestamp.
1220 pub fn set_pts(&mut self, pts: Option<u64>) { self.ts.set_pts(pts); }
1221 /// Sets new decoding timestamp.
1222 pub fn set_dts(&mut self, dts: Option<u64>) { self.ts.set_dts(dts); }
1223 /// Sets new picture ID.
1224 pub fn set_id(&mut self, id: i64) { self.id = id; }
1225 /// Sets new duration.
1226 pub fn set_duration(&mut self, dur: Option<u64>) { self.ts.set_duration(dur); }
1227
1228 /// Returns a reference to the frame data.
1229 pub fn get_buffer(&self) -> NABufferType { self.buffer.clone() }
1230
1231 /// Converts current instance into a reference-counted one.
1232 pub fn into_ref(self) -> NAFrameRef { Arc::new(self) }
1233
1234 /// Creates new frame with metadata from `NAPacket`.
1235 pub fn new_from_pkt(pkt: &NAPacket, info: NACodecInfoRef, buf: NABufferType) -> NAFrame {
1236 NAFrame::new(pkt.ts, FrameType::Other, pkt.keyframe, info, /*HashMap::new(),*/ buf)
1237 }
1238 }
1239
1240 impl fmt::Display for NAFrame {
1241 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1242 let mut ostr = format!("frame type {}", self.frame_type);
1243 if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); }
1244 if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); }
1245 if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); }
1246 if self.key { ostr = format!("{} kf", ostr); }
1247 write!(f, "[{}]", ostr)
1248 }
1249 }
1250
1251 /// A list of possible stream types.
1252 #[derive(Debug,Clone,Copy,PartialEq)]
1253 #[allow(dead_code)]
1254 pub enum StreamType {
1255 /// Video stream.
1256 Video,
1257 /// Audio stream.
1258 Audio,
1259 /// Subtitles.
1260 Subtitles,
1261 /// Any data stream (or might be an unrecognized audio/video stream).
1262 Data,
1263 /// Nonexistent stream.
1264 None,
1265 }
1266
1267 impl fmt::Display for StreamType {
1268 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1269 match *self {
1270 StreamType::Video => write!(f, "Video"),
1271 StreamType::Audio => write!(f, "Audio"),
1272 StreamType::Subtitles => write!(f, "Subtitles"),
1273 StreamType::Data => write!(f, "Data"),
1274 StreamType::None => write!(f, "-"),
1275 }
1276 }
1277 }
1278
1279 /// Stream data.
1280 #[allow(dead_code)]
1281 #[derive(Clone)]
1282 pub struct NAStream {
1283 media_type: StreamType,
1284 /// Stream ID.
1285 pub id: u32,
1286 num: usize,
1287 info: NACodecInfoRef,
1288 /// Timebase numerator.
1289 pub tb_num: u32,
1290 /// Timebase denominator.
1291 pub tb_den: u32,
1292 }
1293
1294 /// A specialised reference-counted `NAStream` type.
1295 pub type NAStreamRef = Arc<NAStream>;
1296
1297 /// Downscales the timebase by its greatest common denominator.
1298 pub fn reduce_timebase(tb_num: u32, tb_den: u32) -> (u32, u32) {
1299 if tb_num == 0 { return (tb_num, tb_den); }
1300 if (tb_den % tb_num) == 0 { return (1, tb_den / tb_num); }
1301
1302 let mut a = tb_num;
1303 let mut b = tb_den;
1304
1305 while a != b {
1306 if a > b { a -= b; }
1307 else if b > a { b -= a; }
1308 }
1309
1310 (tb_num / a, tb_den / a)
1311 }
1312
1313 impl NAStream {
1314 /// Constructs a new `NAStream` instance.
1315 pub fn new(mt: StreamType, id: u32, info: NACodecInfo, tb_num: u32, tb_den: u32) -> Self {
1316 let (n, d) = reduce_timebase(tb_num, tb_den);
1317 NAStream { media_type: mt, id, num: 0, info: info.into_ref(), tb_num: n, tb_den: d }
1318 }
1319 /// Returns stream id.
1320 pub fn get_id(&self) -> u32 { self.id }
1321 /// Returns stream type.
1322 pub fn get_media_type(&self) -> StreamType { self.media_type }
1323 /// Returns stream number assigned by demuxer.
1324 pub fn get_num(&self) -> usize { self.num }
1325 /// Sets stream number.
1326 pub fn set_num(&mut self, num: usize) { self.num = num; }
1327 /// Returns codec information.
1328 pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() }
1329 /// Returns stream timebase.
1330 pub fn get_timebase(&self) -> (u32, u32) { (self.tb_num, self.tb_den) }
1331 /// Sets new stream timebase.
1332 pub fn set_timebase(&mut self, tb_num: u32, tb_den: u32) {
1333 let (n, d) = reduce_timebase(tb_num, tb_den);
1334 self.tb_num = n;
1335 self.tb_den = d;
1336 }
1337 /// Converts current instance into a reference-counted one.
1338 pub fn into_ref(self) -> NAStreamRef { Arc::new(self) }
1339 }
1340
1341 impl fmt::Display for NAStream {
1342 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1343 write!(f, "({}#{} @ {}/{} - {})", self.media_type, self.id, self.tb_num, self.tb_den, self.info.get_properties())
1344 }
1345 }
1346
1347 /// Side data that may accompany demuxed data.
1348 #[derive(Clone)]
1349 pub enum NASideData {
1350 /// Palette information.
1351 ///
1352 /// This side data contains a flag signalling that palette has changed since previous time and a reference to the current palette.
1353 /// Palette is stored in 8-bit RGBA format.
1354 Palette(bool, Arc<[u8; 1024]>),
1355 /// Generic user data.
1356 UserData(Arc<Vec<u8>>),
1357 }
1358
1359 /// Packet with compressed data.
1360 #[allow(dead_code)]
1361 pub struct NAPacket {
1362 stream: NAStreamRef,
1363 /// Packet timestamp.
1364 pub ts: NATimeInfo,
1365 buffer: NABufferRef<Vec<u8>>,
1366 /// Keyframe flag.
1367 pub keyframe: bool,
1368 // options: HashMap<String, NAValue<'a>>,
1369 /// Packet side data (e.g. palette for paletted formats).
1370 pub side_data: Vec<NASideData>,
1371 }
1372
1373 impl NAPacket {
1374 /// Constructs a new `NAPacket` instance.
1375 pub fn new(str: NAStreamRef, ts: NATimeInfo, kf: bool, vec: Vec<u8>) -> Self {
1376 // let mut vec: Vec<u8> = Vec::new();
1377 // vec.resize(size, 0);
1378 NAPacket { stream: str, ts, keyframe: kf, buffer: NABufferRef::new(vec), side_data: Vec::new() }
1379 }
1380 /// Constructs a new `NAPacket` instance reusing a buffer reference.
1381 pub fn new_from_refbuf(str: NAStreamRef, ts: NATimeInfo, kf: bool, buffer: NABufferRef<Vec<u8>>) -> Self {
1382 NAPacket { stream: str, ts, keyframe: kf, buffer, side_data: Vec::new() }
1383 }
1384 /// Returns information about the stream packet belongs to.
1385 pub fn get_stream(&self) -> NAStreamRef { self.stream.clone() }
1386 /// Returns packet timestamp.
1387 pub fn get_time_information(&self) -> NATimeInfo { self.ts }
1388 /// Returns packet presentation timestamp.
1389 pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() }
1390 /// Returns packet decoding timestamp.
1391 pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() }
1392 /// Returns packet duration.
1393 pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() }
1394 /// Reports whether this is a keyframe packet.
1395 pub fn is_keyframe(&self) -> bool { self.keyframe }
1396 /// Returns a reference to packet data.
1397 pub fn get_buffer(&self) -> NABufferRef<Vec<u8>> { self.buffer.clone() }
1398 /// Adds side data for a packet.
1399 pub fn add_side_data(&mut self, side_data: NASideData) { self.side_data.push(side_data); }
1400 /// Assigns packet to a new stream.
1401 pub fn reassign(&mut self, str: NAStreamRef, ts: NATimeInfo) {
1402 self.stream = str;
1403 self.ts = ts;
1404 }
1405 }
1406
1407 impl Drop for NAPacket {
1408 fn drop(&mut self) {}
1409 }
1410
1411 impl fmt::Display for NAPacket {
1412 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1413 let mut ostr = format!("[pkt for {} size {}", self.stream, self.buffer.len());
1414 if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); }
1415 if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); }
1416 if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); }
1417 if self.keyframe { ostr = format!("{} kf", ostr); }
1418 ostr += "]";
1419 write!(f, "{}", ostr)
1420 }
1421 }
1422
1423 #[cfg(test)]
1424 mod test {
1425 use super::*;
1426
1427 #[test]
1428 fn test_time_parse() {
1429 assert_eq!(NATimePoint::PTS(42).to_string(), "42pts");
1430 assert_eq!(NATimePoint::Milliseconds(4242000).to_string(), "1:10:42");
1431 assert_eq!(NATimePoint::Milliseconds(42424242).to_string(), "11:47:04.242");
1432 let ret = NATimePoint::from_str("42pts");
1433 assert_eq!(ret.unwrap(), NATimePoint::PTS(42));
1434 let ret = NATimePoint::from_str("1:2:3");
1435 assert_eq!(ret.unwrap(), NATimePoint::Milliseconds(3723000));
1436 let ret = NATimePoint::from_str("1:2:3.42");
1437 assert_eq!(ret.unwrap(), NATimePoint::Milliseconds(3723420));
1438 }
1439 }