1 //! Packets and decoded frames functionality.
3 //use std::collections::HashMap;
6 pub use crate::formats::*;
7 pub use crate::refs::*;
9 /// Audio stream information.
11 #[derive(Clone,Copy,PartialEq)]
12 pub struct NAAudioInfo {
15 /// Number of channels.
17 /// Audio sample format.
18 pub format: NASoniton,
19 /// Length of one audio block in samples.
24 /// Constructs a new `NAAudioInfo` instance.
25 pub fn new(sr: u32, ch: u8, fmt: NASoniton, bl: usize) -> Self {
26 NAAudioInfo { sample_rate: sr, channels: ch, format: fmt, block_len: bl }
28 /// Returns audio sample rate.
29 pub fn get_sample_rate(&self) -> u32 { self.sample_rate }
30 /// Returns the number of channels.
31 pub fn get_channels(&self) -> u8 { self.channels }
32 /// Returns sample format.
33 pub fn get_format(&self) -> NASoniton { self.format }
34 /// Returns one audio block duration in samples.
35 pub fn get_block_len(&self) -> usize { self.block_len }
38 impl fmt::Display for NAAudioInfo {
39 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
40 write!(f, "{} Hz, {} ch", self.sample_rate, self.channels)
44 /// Video stream information.
46 #[derive(Clone,Copy,PartialEq)]
47 pub struct NAVideoInfo {
52 /// Picture is stored downside up.
54 /// Picture pixel format.
55 pub format: NAPixelFormaton,
59 /// Constructs a new `NAVideoInfo` instance.
60 pub fn new(w: usize, h: usize, flip: bool, fmt: NAPixelFormaton) -> Self {
61 NAVideoInfo { width: w, height: h, flipped: flip, format: fmt }
63 /// Returns picture width.
64 pub fn get_width(&self) -> usize { self.width as usize }
65 /// Returns picture height.
66 pub fn get_height(&self) -> usize { self.height as usize }
67 /// Returns picture orientation.
68 pub fn is_flipped(&self) -> bool { self.flipped }
69 /// Returns picture pixel format.
70 pub fn get_format(&self) -> NAPixelFormaton { self.format }
71 /// Sets new picture width.
72 pub fn set_width(&mut self, w: usize) { self.width = w; }
73 /// Sets new picture height.
74 pub fn set_height(&mut self, h: usize) { self.height = h; }
77 impl fmt::Display for NAVideoInfo {
78 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
79 write!(f, "{}x{}", self.width, self.height)
83 /// A list of possible stream information types.
84 #[derive(Clone,Copy,PartialEq)]
85 pub enum NACodecTypeInfo {
88 /// Audio codec information.
90 /// Video codec information.
94 impl NACodecTypeInfo {
95 /// Returns video stream information.
96 pub fn get_video_info(&self) -> Option<NAVideoInfo> {
98 NACodecTypeInfo::Video(vinfo) => Some(vinfo),
102 /// Returns audio stream information.
103 pub fn get_audio_info(&self) -> Option<NAAudioInfo> {
105 NACodecTypeInfo::Audio(ainfo) => Some(ainfo),
109 /// Reports whether the current stream is video stream.
110 pub fn is_video(&self) -> bool {
112 NACodecTypeInfo::Video(_) => true,
116 /// Reports whether the current stream is audio stream.
117 pub fn is_audio(&self) -> bool {
119 NACodecTypeInfo::Audio(_) => true,
125 impl fmt::Display for NACodecTypeInfo {
126 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
127 let ret = match *self {
128 NACodecTypeInfo::None => "".to_string(),
129 NACodecTypeInfo::Audio(fmt) => format!("{}", fmt),
130 NACodecTypeInfo::Video(fmt) => format!("{}", fmt),
136 /// Decoded video frame.
138 /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer.
139 /// In case of image with several components those components are stored sequentially and can be accessed in the buffer starting at corresponding component offset.
141 pub struct NAVideoBuffer<T> {
143 data: NABufferRef<Vec<T>>,
148 impl<T: Clone> NAVideoBuffer<T> {
149 /// Returns the component offset (0 for all unavailable offsets).
150 pub fn get_offset(&self, idx: usize) -> usize {
151 if idx >= self.offs.len() { 0 }
152 else { self.offs[idx] }
154 /// Returns picture info.
155 pub fn get_info(&self) -> NAVideoInfo { self.info }
156 /// Returns an immutable reference to the data.
157 pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() }
158 /// Returns a mutable reference to the data.
159 pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() }
160 /// Returns the number of components in picture format.
161 pub fn get_num_components(&self) -> usize { self.offs.len() }
162 /// Creates a copy of current `NAVideoBuffer`.
163 pub fn copy_buffer(&mut self) -> Self {
164 let mut data: Vec<T> = Vec::with_capacity(self.data.len());
165 data.clone_from(self.data.as_ref());
166 let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
167 offs.clone_from(&self.offs);
168 let mut strides: Vec<usize> = Vec::with_capacity(self.strides.len());
169 strides.clone_from(&self.strides);
170 NAVideoBuffer { info: self.info, data: NABufferRef::new(data), offs, strides }
172 /// Returns stride (distance between subsequent lines) for the requested component.
173 pub fn get_stride(&self, idx: usize) -> usize {
174 if idx >= self.strides.len() { return 0; }
177 /// Returns requested component dimensions.
178 pub fn get_dimensions(&self, idx: usize) -> (usize, usize) {
179 get_plane_size(&self.info, idx)
181 /// Converts current instance into buffer reference.
182 pub fn into_ref(self) -> NABufferRef<Self> {
183 NABufferRef::new(self)
187 /// A specialised type for reference-counted `NAVideoBuffer`.
188 pub type NAVideoBufferRef<T> = NABufferRef<NAVideoBuffer<T>>;
190 /// Decoded audio frame.
192 /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer.
193 /// In case of planar audio samples for each channel are stored sequentially and can be accessed in the buffer starting at corresponding channel offset.
195 pub struct NAAudioBuffer<T> {
197 data: NABufferRef<Vec<T>>,
204 impl<T: Clone> NAAudioBuffer<T> {
205 /// Returns the start position of requested channel data.
206 pub fn get_offset(&self, idx: usize) -> usize {
207 if idx >= self.offs.len() { 0 }
208 else { self.offs[idx] }
210 /// Returns the distance between the start of one channel and the next one.
211 pub fn get_stride(&self) -> usize { self.stride }
212 /// Returns audio format information.
213 pub fn get_info(&self) -> NAAudioInfo { self.info }
214 /// Returns channel map.
215 pub fn get_chmap(&self) -> &NAChannelMap { &self.chmap }
216 /// Returns an immutable reference to the data.
217 pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() }
218 /// Returns a mutable reference to the data.
219 pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() }
220 /// Clones current `NAAudioBuffer` into a new one.
221 pub fn copy_buffer(&mut self) -> Self {
222 let mut data: Vec<T> = Vec::with_capacity(self.data.len());
223 data.clone_from(self.data.as_ref());
224 let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
225 offs.clone_from(&self.offs);
226 NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride }
228 /// Return the length of frame in samples.
229 pub fn get_length(&self) -> usize { self.len }
232 impl NAAudioBuffer<u8> {
233 /// Constructs a new `NAAudioBuffer` instance.
234 pub fn new_from_buf(info: NAAudioInfo, data: NABufferRef<Vec<u8>>, chmap: NAChannelMap) -> Self {
235 let len = data.len();
236 NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0 }
240 /// A list of possible decoded frame types.
242 pub enum NABufferType {
243 /// 8-bit video buffer.
244 Video (NAVideoBufferRef<u8>),
245 /// 16-bit video buffer (i.e. every component or packed pixel fits into 16 bits).
246 Video16 (NAVideoBufferRef<u16>),
247 /// 32-bit video buffer (i.e. every component or packed pixel fits into 32 bits).
248 Video32 (NAVideoBufferRef<u32>),
249 /// Packed video buffer.
250 VideoPacked(NAVideoBufferRef<u8>),
251 /// Audio buffer with 8-bit unsigned integer audio.
252 AudioU8 (NAAudioBuffer<u8>),
253 /// Audio buffer with 16-bit signed integer audio.
254 AudioI16 (NAAudioBuffer<i16>),
255 /// Audio buffer with 32-bit signed integer audio.
256 AudioI32 (NAAudioBuffer<i32>),
257 /// Audio buffer with 32-bit floating point audio.
258 AudioF32 (NAAudioBuffer<f32>),
259 /// Packed audio buffer.
260 AudioPacked(NAAudioBuffer<u8>),
261 /// Buffer with generic data (e.g. subtitles).
262 Data (NABufferRef<Vec<u8>>),
268 /// Returns the offset to the requested component or channel.
269 pub fn get_offset(&self, idx: usize) -> usize {
271 NABufferType::Video(ref vb) => vb.get_offset(idx),
272 NABufferType::Video16(ref vb) => vb.get_offset(idx),
273 NABufferType::Video32(ref vb) => vb.get_offset(idx),
274 NABufferType::VideoPacked(ref vb) => vb.get_offset(idx),
275 NABufferType::AudioU8(ref ab) => ab.get_offset(idx),
276 NABufferType::AudioI16(ref ab) => ab.get_offset(idx),
277 NABufferType::AudioF32(ref ab) => ab.get_offset(idx),
278 NABufferType::AudioPacked(ref ab) => ab.get_offset(idx),
282 /// Returns information for video frames.
283 pub fn get_video_info(&self) -> Option<NAVideoInfo> {
285 NABufferType::Video(ref vb) => Some(vb.get_info()),
286 NABufferType::Video16(ref vb) => Some(vb.get_info()),
287 NABufferType::Video32(ref vb) => Some(vb.get_info()),
288 NABufferType::VideoPacked(ref vb) => Some(vb.get_info()),
292 /// Returns reference to 8-bit (or packed) video buffer.
293 pub fn get_vbuf(&self) -> Option<NAVideoBufferRef<u8>> {
295 NABufferType::Video(ref vb) => Some(vb.clone()),
296 NABufferType::VideoPacked(ref vb) => Some(vb.clone()),
300 /// Returns reference to 16-bit video buffer.
301 pub fn get_vbuf16(&self) -> Option<NAVideoBufferRef<u16>> {
303 NABufferType::Video16(ref vb) => Some(vb.clone()),
307 /// Returns reference to 32-bit video buffer.
308 pub fn get_vbuf32(&self) -> Option<NAVideoBufferRef<u32>> {
310 NABufferType::Video32(ref vb) => Some(vb.clone()),
314 /// Returns information for audio frames.
315 pub fn get_audio_info(&self) -> Option<NAAudioInfo> {
317 NABufferType::AudioU8(ref ab) => Some(ab.get_info()),
318 NABufferType::AudioI16(ref ab) => Some(ab.get_info()),
319 NABufferType::AudioI32(ref ab) => Some(ab.get_info()),
320 NABufferType::AudioF32(ref ab) => Some(ab.get_info()),
321 NABufferType::AudioPacked(ref ab) => Some(ab.get_info()),
325 /// Returns audio channel map.
326 pub fn get_chmap(&self) -> Option<&NAChannelMap> {
328 NABufferType::AudioU8(ref ab) => Some(ab.get_chmap()),
329 NABufferType::AudioI16(ref ab) => Some(ab.get_chmap()),
330 NABufferType::AudioI32(ref ab) => Some(ab.get_chmap()),
331 NABufferType::AudioF32(ref ab) => Some(ab.get_chmap()),
332 NABufferType::AudioPacked(ref ab) => Some(ab.get_chmap()),
336 /// Returns audio frame duration in samples.
337 pub fn get_audio_length(&self) -> usize {
339 NABufferType::AudioU8(ref ab) => ab.get_length(),
340 NABufferType::AudioI16(ref ab) => ab.get_length(),
341 NABufferType::AudioI32(ref ab) => ab.get_length(),
342 NABufferType::AudioF32(ref ab) => ab.get_length(),
343 NABufferType::AudioPacked(ref ab) => ab.get_length(),
347 /// Returns the distance between starts of two channels.
348 pub fn get_audio_stride(&self) -> usize {
350 NABufferType::AudioU8(ref ab) => ab.get_stride(),
351 NABufferType::AudioI16(ref ab) => ab.get_stride(),
352 NABufferType::AudioI32(ref ab) => ab.get_stride(),
353 NABufferType::AudioF32(ref ab) => ab.get_stride(),
354 NABufferType::AudioPacked(ref ab) => ab.get_stride(),
358 /// Returns reference to 8-bit (or packed) audio buffer.
359 pub fn get_abuf_u8(&self) -> Option<NAAudioBuffer<u8>> {
361 NABufferType::AudioU8(ref ab) => Some(ab.clone()),
362 NABufferType::AudioPacked(ref ab) => Some(ab.clone()),
366 /// Returns reference to 16-bit audio buffer.
367 pub fn get_abuf_i16(&self) -> Option<NAAudioBuffer<i16>> {
369 NABufferType::AudioI16(ref ab) => Some(ab.clone()),
373 /// Returns reference to 32-bit integer audio buffer.
374 pub fn get_abuf_i32(&self) -> Option<NAAudioBuffer<i32>> {
376 NABufferType::AudioI32(ref ab) => Some(ab.clone()),
380 /// Returns reference to 32-bit floating point audio buffer.
381 pub fn get_abuf_f32(&self) -> Option<NAAudioBuffer<f32>> {
383 NABufferType::AudioF32(ref ab) => Some(ab.clone()),
389 const NA_SIMPLE_VFRAME_COMPONENTS: usize = 4;
390 /// Simplified decoded frame data.
391 pub struct NASimpleVideoFrame<'a, T: Copy> {
392 /// Widths of each picture component.
393 pub width: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
394 /// Heights of each picture component.
395 pub height: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
396 /// Orientation (upside-down or downside-up) flag.
398 /// Strides for each component.
399 pub stride: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
400 /// Start of each component.
401 pub offset: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
402 /// Number of components.
403 pub components: usize,
404 /// Pointer to the picture pixel data.
405 pub data: &'a mut [T],
408 impl<'a, T:Copy> NASimpleVideoFrame<'a, T> {
409 /// Constructs a new instance of `NASimpleVideoFrame` from `NAVideoBuffer`.
410 pub fn from_video_buf(vbuf: &'a mut NAVideoBuffer<T>) -> Option<Self> {
411 let vinfo = vbuf.get_info();
412 let components = vinfo.format.components as usize;
413 if components > NA_SIMPLE_VFRAME_COMPONENTS {
416 let mut w: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
417 let mut h: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
418 let mut s: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
419 let mut o: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
420 for comp in 0..components {
421 let (width, height) = vbuf.get_dimensions(comp);
424 s[comp] = vbuf.get_stride(comp);
425 o[comp] = vbuf.get_offset(comp);
427 let flip = vinfo.flipped;
428 Some(NASimpleVideoFrame {
435 data: vbuf.data.as_mut_slice(),
440 /// A list of possible frame allocator errors.
441 #[derive(Debug,Clone,Copy,PartialEq)]
442 pub enum AllocatorError {
443 /// Requested picture dimensions are too large.
445 /// Invalid input format.
449 /// Constructs a new video buffer with requested format.
451 /// `align` is power of two alignment for image. E.g. the value of 5 means that frame dimensions will be padded to be multiple of 32.
452 pub fn alloc_video_buffer(vinfo: NAVideoInfo, align: u8) -> Result<NABufferType, AllocatorError> {
453 let fmt = &vinfo.format;
454 let mut new_size: usize = 0;
455 let mut offs: Vec<usize> = Vec::new();
456 let mut strides: Vec<usize> = Vec::new();
458 for i in 0..fmt.get_num_comp() {
459 if fmt.get_chromaton(i) == None { return Err(AllocatorError::FormatError); }
462 let align_mod = ((1 << align) as usize) - 1;
463 let width = ((vinfo.width as usize) + align_mod) & !align_mod;
464 let height = ((vinfo.height as usize) + align_mod) & !align_mod;
465 let mut max_depth = 0;
466 let mut all_packed = true;
467 let mut all_bytealigned = true;
468 for i in 0..fmt.get_num_comp() {
469 let ochr = fmt.get_chromaton(i);
470 if ochr.is_none() { continue; }
471 let chr = ochr.unwrap();
472 if !chr.is_packed() {
474 } else if ((chr.get_shift() + chr.get_depth()) & 7) != 0 {
475 all_bytealigned = false;
477 max_depth = max(max_depth, chr.get_depth());
479 let unfit_elem_size = match fmt.get_elem_size() {
484 //todo semi-packed like NV12
485 if fmt.is_paletted() {
486 //todo various-sized palettes?
487 let stride = vinfo.get_format().get_chromaton(0).unwrap().get_linesize(width);
488 let pic_sz = stride.checked_mul(height);
489 if pic_sz == None { return Err(AllocatorError::TooLargeDimensions); }
490 let pal_size = 256 * (fmt.get_elem_size() as usize);
491 let new_size = pic_sz.unwrap().checked_add(pal_size);
492 if new_size == None { return Err(AllocatorError::TooLargeDimensions); }
494 offs.push(stride * height);
495 strides.push(stride);
496 let data: Vec<u8> = vec![0; new_size.unwrap()];
497 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
498 Ok(NABufferType::Video(buf.into_ref()))
499 } else if !all_packed {
500 for i in 0..fmt.get_num_comp() {
501 let ochr = fmt.get_chromaton(i);
502 if ochr.is_none() { continue; }
503 let chr = ochr.unwrap();
504 offs.push(new_size as usize);
505 let stride = chr.get_linesize(width);
506 let cur_h = chr.get_height(height);
507 let cur_sz = stride.checked_mul(cur_h);
508 if cur_sz == None { return Err(AllocatorError::TooLargeDimensions); }
509 let new_sz = new_size.checked_add(cur_sz.unwrap());
510 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
511 new_size = new_sz.unwrap();
512 strides.push(stride);
515 let data: Vec<u8> = vec![0; new_size];
516 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
517 Ok(NABufferType::Video(buf.into_ref()))
518 } else if max_depth <= 16 {
519 let data: Vec<u16> = vec![0; new_size];
520 let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
521 Ok(NABufferType::Video16(buf.into_ref()))
523 let data: Vec<u32> = vec![0; new_size];
524 let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
525 Ok(NABufferType::Video32(buf.into_ref()))
527 } else if all_bytealigned || unfit_elem_size {
528 let elem_sz = fmt.get_elem_size();
529 let line_sz = width.checked_mul(elem_sz as usize);
530 if line_sz == None { return Err(AllocatorError::TooLargeDimensions); }
531 let new_sz = line_sz.unwrap().checked_mul(height);
532 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
533 new_size = new_sz.unwrap();
534 let data: Vec<u8> = vec![0; new_size];
535 strides.push(line_sz.unwrap());
536 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
537 Ok(NABufferType::VideoPacked(buf.into_ref()))
539 let elem_sz = fmt.get_elem_size();
540 let new_sz = width.checked_mul(height);
541 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
542 new_size = new_sz.unwrap();
545 let data: Vec<u16> = vec![0; new_size];
547 let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
548 Ok(NABufferType::Video16(buf.into_ref()))
551 let data: Vec<u32> = vec![0; new_size];
553 let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
554 Ok(NABufferType::Video32(buf.into_ref()))
561 /// Constructs a new audio buffer for the requested format and length.
562 #[allow(clippy::collapsible_if)]
563 pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelMap) -> Result<NABufferType, AllocatorError> {
564 let mut offs: Vec<usize> = Vec::new();
565 if ainfo.format.is_planar() || (ainfo.channels == 1 && (ainfo.format.get_bits() % 8) == 0) {
566 let len = nsamples.checked_mul(ainfo.channels as usize);
567 if len == None { return Err(AllocatorError::TooLargeDimensions); }
568 let length = len.unwrap();
569 let stride = nsamples;
570 for i in 0..ainfo.channels {
571 offs.push((i as usize) * stride);
573 if ainfo.format.is_float() {
574 if ainfo.format.get_bits() == 32 {
575 let data: Vec<f32> = vec![0.0; length];
576 let buf: NAAudioBuffer<f32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride };
577 Ok(NABufferType::AudioF32(buf))
579 Err(AllocatorError::TooLargeDimensions)
582 if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() {
583 let data: Vec<u8> = vec![0; length];
584 let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride };
585 Ok(NABufferType::AudioU8(buf))
586 } else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() {
587 let data: Vec<i16> = vec![0; length];
588 let buf: NAAudioBuffer<i16> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride };
589 Ok(NABufferType::AudioI16(buf))
591 Err(AllocatorError::TooLargeDimensions)
595 let len = nsamples.checked_mul(ainfo.channels as usize);
596 if len == None { return Err(AllocatorError::TooLargeDimensions); }
597 let length = ainfo.format.get_audio_size(len.unwrap() as u64);
598 let data: Vec<u8> = vec![0; length];
599 let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0 };
600 Ok(NABufferType::AudioPacked(buf))
604 /// Constructs a new buffer for generic data.
605 pub fn alloc_data_buffer(size: usize) -> Result<NABufferType, AllocatorError> {
606 let data: Vec<u8> = vec![0; size];
607 let buf: NABufferRef<Vec<u8>> = NABufferRef::new(data);
608 Ok(NABufferType::Data(buf))
611 /// Creates a clone of current buffer.
612 pub fn copy_buffer(buf: NABufferType) -> NABufferType {
616 /// Video frame pool.
618 /// This structure allows codec to effectively reuse old frames instead of allocating and de-allocating frames every time.
619 /// Caller can also reserve some frames for its own purposes e.g. display queue.
620 pub struct NAVideoBufferPool<T:Copy> {
621 pool: Vec<NAVideoBufferRef<T>>,
626 impl<T:Copy> NAVideoBufferPool<T> {
627 /// Constructs a new `NAVideoBufferPool` instance.
628 pub fn new(max_len: usize) -> Self {
630 pool: Vec::with_capacity(max_len),
635 /// Sets the number of buffers reserved for the user.
636 pub fn set_dec_bufs(&mut self, add_len: usize) {
637 self.add_len = add_len;
639 /// Returns an unused buffer from the pool.
640 pub fn get_free(&mut self) -> Option<NAVideoBufferRef<T>> {
641 for e in self.pool.iter() {
642 if e.get_num_refs() == 1 {
643 return Some(e.clone());
648 /// Clones provided frame data into a free pool frame.
649 pub fn get_copy(&mut self, rbuf: &NAVideoBufferRef<T>) -> Option<NAVideoBufferRef<T>> {
650 let mut dbuf = self.get_free()?;
651 dbuf.data.copy_from_slice(&rbuf.data);
654 /// Clears the pool from all frames.
655 pub fn reset(&mut self) {
656 self.pool.truncate(0);
660 impl NAVideoBufferPool<u8> {
661 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
663 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
664 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
665 let nbufs = self.max_len + self.add_len - self.pool.len();
667 let vbuf = alloc_video_buffer(vinfo, align)?;
668 if let NABufferType::Video(buf) = vbuf {
670 } else if let NABufferType::VideoPacked(buf) = vbuf {
673 return Err(AllocatorError::FormatError);
680 impl NAVideoBufferPool<u16> {
681 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
683 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
684 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
685 let nbufs = self.max_len + self.add_len - self.pool.len();
687 let vbuf = alloc_video_buffer(vinfo, align)?;
688 if let NABufferType::Video16(buf) = vbuf {
691 return Err(AllocatorError::FormatError);
698 impl NAVideoBufferPool<u32> {
699 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
701 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
702 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
703 let nbufs = self.max_len + self.add_len - self.pool.len();
705 let vbuf = alloc_video_buffer(vinfo, align)?;
706 if let NABufferType::Video32(buf) = vbuf {
709 return Err(AllocatorError::FormatError);
716 /// Information about codec contained in a stream.
719 pub struct NACodecInfo {
721 properties: NACodecTypeInfo,
722 extradata: Option<Arc<Vec<u8>>>,
725 /// A specialised type for reference-counted `NACodecInfo`.
726 pub type NACodecInfoRef = Arc<NACodecInfo>;
729 /// Constructs a new instance of `NACodecInfo`.
730 pub fn new(name: &'static str, p: NACodecTypeInfo, edata: Option<Vec<u8>>) -> Self {
731 let extradata = match edata {
733 Some(vec) => Some(Arc::new(vec)),
735 NACodecInfo { name, properties: p, extradata }
737 /// Constructs a new reference-counted instance of `NACodecInfo`.
738 pub fn new_ref(name: &'static str, p: NACodecTypeInfo, edata: Option<Arc<Vec<u8>>>) -> Self {
739 NACodecInfo { name, properties: p, extradata: edata }
741 /// Converts current instance into a reference-counted one.
742 pub fn into_ref(self) -> NACodecInfoRef { Arc::new(self) }
743 /// Returns codec information.
744 pub fn get_properties(&self) -> NACodecTypeInfo { self.properties }
745 /// Returns additional initialisation data required by the codec.
746 pub fn get_extradata(&self) -> Option<Arc<Vec<u8>>> {
747 if let Some(ref vec) = self.extradata { return Some(vec.clone()); }
750 /// Returns codec name.
751 pub fn get_name(&self) -> &'static str { self.name }
752 /// Reports whether it is a video codec.
753 pub fn is_video(&self) -> bool {
754 if let NACodecTypeInfo::Video(_) = self.properties { return true; }
757 /// Reports whether it is an audio codec.
758 pub fn is_audio(&self) -> bool {
759 if let NACodecTypeInfo::Audio(_) = self.properties { return true; }
762 /// Constructs a new empty reference-counted instance of `NACodecInfo`.
763 pub fn new_dummy() -> Arc<Self> {
764 Arc::new(DUMMY_CODEC_INFO)
766 /// Updates codec infomation.
767 pub fn replace_info(&self, p: NACodecTypeInfo) -> Arc<Self> {
768 Arc::new(NACodecInfo { name: self.name, properties: p, extradata: self.extradata.clone() })
772 impl Default for NACodecInfo {
773 fn default() -> Self { DUMMY_CODEC_INFO }
776 impl fmt::Display for NACodecInfo {
777 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
778 let edata = match self.extradata.clone() {
779 None => "no extradata".to_string(),
780 Some(v) => format!("{} byte(s) of extradata", v.len()),
782 write!(f, "{}: {} {}", self.name, self.properties, edata)
786 /// Default empty codec information.
787 pub const DUMMY_CODEC_INFO: NACodecInfo = NACodecInfo {
789 properties: NACodecTypeInfo::None,
792 /// A list of accepted option values.
793 #[derive(Debug,Clone)]
799 /// Long integer value.
803 /// Binary data value.
807 /// A list of recognized frame types.
808 #[derive(Debug,Clone,Copy,PartialEq)]
811 /// Intra frame type.
813 /// Inter frame type.
815 /// Bidirectionally predicted frame.
819 /// When such frame is encountered then last frame should be used again if it is needed.
821 /// Some other frame type.
825 impl fmt::Display for FrameType {
826 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
828 FrameType::I => write!(f, "I"),
829 FrameType::P => write!(f, "P"),
830 FrameType::B => write!(f, "B"),
831 FrameType::Skip => write!(f, "skip"),
832 FrameType::Other => write!(f, "x"),
837 /// Timestamp information.
838 #[derive(Debug,Clone,Copy)]
839 pub struct NATimeInfo {
840 /// Presentation timestamp.
841 pub pts: Option<u64>,
842 /// Decode timestamp.
843 pub dts: Option<u64>,
844 /// Duration (in timebase units).
845 pub duration: Option<u64>,
846 /// Timebase numerator.
848 /// Timebase denominator.
853 /// Constructs a new `NATimeInfo` instance.
854 pub fn new(pts: Option<u64>, dts: Option<u64>, duration: Option<u64>, tb_num: u32, tb_den: u32) -> Self {
855 NATimeInfo { pts, dts, duration, tb_num, tb_den }
857 /// Returns presentation timestamp.
858 pub fn get_pts(&self) -> Option<u64> { self.pts }
859 /// Returns decoding timestamp.
860 pub fn get_dts(&self) -> Option<u64> { self.dts }
861 /// Returns duration.
862 pub fn get_duration(&self) -> Option<u64> { self.duration }
863 /// Sets new presentation timestamp.
864 pub fn set_pts(&mut self, pts: Option<u64>) { self.pts = pts; }
865 /// Sets new decoding timestamp.
866 pub fn set_dts(&mut self, dts: Option<u64>) { self.dts = dts; }
867 /// Sets new duration.
868 pub fn set_duration(&mut self, dur: Option<u64>) { self.duration = dur; }
870 /// Converts time in given scale into timestamp in given base.
871 pub fn time_to_ts(time: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 {
872 let tb_num = tb_num as u64;
873 let tb_den = tb_den as u64;
874 let tmp = time.checked_mul(tb_num);
875 if let Some(tmp) = tmp {
878 let tmp = time.checked_mul(tb_num);
879 if let Some(tmp) = tmp {
882 let coarse = time / base;
883 let tmp = coarse.checked_mul(tb_num);
884 if let Some(tmp) = tmp {
887 (coarse / tb_den) * tb_num
892 /// Converts timestamp in given base into time in given scale.
893 pub fn ts_to_time(ts: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 {
894 let tb_num = tb_num as u64;
895 let tb_den = tb_den as u64;
896 let tmp = ts.checked_mul(base);
897 if let Some(tmp) = tmp {
898 let tmp2 = tmp.checked_mul(tb_num);
899 if let Some(tmp2) = tmp2 {
902 (tmp / tb_den) * tb_num
905 let tmp = ts.checked_mul(tb_num);
906 if let Some(tmp) = tmp {
907 (tmp / tb_den) * base
909 (ts / tb_den) * base * tb_num
915 /// Decoded frame information.
923 buffer: NABufferType,
924 info: NACodecInfoRef,
926 pub frame_type: FrameType,
929 // options: HashMap<String, NAValue>,
932 /// A specialised type for reference-counted `NAFrame`.
933 pub type NAFrameRef = Arc<NAFrame>;
935 fn get_plane_size(info: &NAVideoInfo, idx: usize) -> (usize, usize) {
936 let chromaton = info.get_format().get_chromaton(idx);
937 if chromaton.is_none() { return (0, 0); }
938 let (hs, vs) = chromaton.unwrap().get_subsampling();
939 let w = (info.get_width() + ((1 << hs) - 1)) >> hs;
940 let h = (info.get_height() + ((1 << vs) - 1)) >> vs;
945 /// Constructs a new `NAFrame` instance.
946 pub fn new(ts: NATimeInfo,
949 info: NACodecInfoRef,
950 /*options: HashMap<String, NAValue>,*/
951 buffer: NABufferType) -> Self {
952 NAFrame { ts, id: 0, buffer, info, frame_type: ftype, key: keyframe/*, options*/ }
954 /// Returns frame format information.
955 pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() }
956 /// Returns frame type.
957 pub fn get_frame_type(&self) -> FrameType { self.frame_type }
958 /// Reports whether the frame is a keyframe.
959 pub fn is_keyframe(&self) -> bool { self.key }
960 /// Sets new frame type.
961 pub fn set_frame_type(&mut self, ftype: FrameType) { self.frame_type = ftype; }
962 /// Sets keyframe flag.
963 pub fn set_keyframe(&mut self, key: bool) { self.key = key; }
964 /// Returns frame timestamp.
965 pub fn get_time_information(&self) -> NATimeInfo { self.ts }
966 /// Returns frame presentation time.
967 pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() }
968 /// Returns frame decoding time.
969 pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() }
970 /// Returns picture ID.
971 pub fn get_id(&self) -> i64 { self.id }
972 /// Returns frame display duration.
973 pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() }
974 /// Sets new presentation timestamp.
975 pub fn set_pts(&mut self, pts: Option<u64>) { self.ts.set_pts(pts); }
976 /// Sets new decoding timestamp.
977 pub fn set_dts(&mut self, dts: Option<u64>) { self.ts.set_dts(dts); }
978 /// Sets new picture ID.
979 pub fn set_id(&mut self, id: i64) { self.id = id; }
980 /// Sets new duration.
981 pub fn set_duration(&mut self, dur: Option<u64>) { self.ts.set_duration(dur); }
983 /// Returns a reference to the frame data.
984 pub fn get_buffer(&self) -> NABufferType { self.buffer.clone() }
986 /// Converts current instance into a reference-counted one.
987 pub fn into_ref(self) -> NAFrameRef { Arc::new(self) }
989 /// Creates new frame with metadata from `NAPacket`.
990 pub fn new_from_pkt(pkt: &NAPacket, info: NACodecInfoRef, buf: NABufferType) -> NAFrame {
991 NAFrame::new(pkt.ts, FrameType::Other, pkt.keyframe, info, /*HashMap::new(),*/ buf)
995 impl fmt::Display for NAFrame {
996 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
997 let mut ostr = format!("frame type {}", self.frame_type);
998 if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); }
999 if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); }
1000 if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); }
1001 if self.key { ostr = format!("{} kf", ostr); }
1002 write!(f, "[{}]", ostr)
1006 /// A list of possible stream types.
1007 #[derive(Debug,Clone,Copy,PartialEq)]
1009 pub enum StreamType {
1016 /// Any data stream (or might be an unrecognized audio/video stream).
1018 /// Nonexistent stream.
1022 impl fmt::Display for StreamType {
1023 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1025 StreamType::Video => write!(f, "Video"),
1026 StreamType::Audio => write!(f, "Audio"),
1027 StreamType::Subtitles => write!(f, "Subtitles"),
1028 StreamType::Data => write!(f, "Data"),
1029 StreamType::None => write!(f, "-"),
1037 pub struct NAStream {
1038 media_type: StreamType,
1042 info: NACodecInfoRef,
1043 /// Timebase numerator.
1045 /// Timebase denominator.
1049 /// A specialised reference-counted `NAStream` type.
1050 pub type NAStreamRef = Arc<NAStream>;
1052 /// Downscales the timebase by its greatest common denominator.
1053 pub fn reduce_timebase(tb_num: u32, tb_den: u32) -> (u32, u32) {
1054 if tb_num == 0 { return (tb_num, tb_den); }
1055 if (tb_den % tb_num) == 0 { return (1, tb_den / tb_num); }
1061 if a > b { a -= b; }
1062 else if b > a { b -= a; }
1065 (tb_num / a, tb_den / a)
1069 /// Constructs a new `NAStream` instance.
1070 pub fn new(mt: StreamType, id: u32, info: NACodecInfo, tb_num: u32, tb_den: u32) -> Self {
1071 let (n, d) = reduce_timebase(tb_num, tb_den);
1072 NAStream { media_type: mt, id, num: 0, info: info.into_ref(), tb_num: n, tb_den: d }
1074 /// Returns stream id.
1075 pub fn get_id(&self) -> u32 { self.id }
1076 /// Returns stream type.
1077 pub fn get_media_type(&self) -> StreamType { self.media_type }
1078 /// Returns stream number assigned by demuxer.
1079 pub fn get_num(&self) -> usize { self.num }
1080 /// Sets stream number.
1081 pub fn set_num(&mut self, num: usize) { self.num = num; }
1082 /// Returns codec information.
1083 pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() }
1084 /// Returns stream timebase.
1085 pub fn get_timebase(&self) -> (u32, u32) { (self.tb_num, self.tb_den) }
1086 /// Sets new stream timebase.
1087 pub fn set_timebase(&mut self, tb_num: u32, tb_den: u32) {
1088 let (n, d) = reduce_timebase(tb_num, tb_den);
1092 /// Converts current instance into a reference-counted one.
1093 pub fn into_ref(self) -> NAStreamRef { Arc::new(self) }
1096 impl fmt::Display for NAStream {
1097 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1098 write!(f, "({}#{} @ {}/{} - {})", self.media_type, self.id, self.tb_num, self.tb_den, self.info.get_properties())
1102 /// Packet with compressed data.
1104 pub struct NAPacket {
1105 stream: NAStreamRef,
1106 /// Packet timestamp.
1108 buffer: NABufferRef<Vec<u8>>,
1111 // options: HashMap<String, NAValue<'a>>,
1115 /// Constructs a new `NAPacket` instance.
1116 pub fn new(str: NAStreamRef, ts: NATimeInfo, kf: bool, vec: Vec<u8>) -> Self {
1117 // let mut vec: Vec<u8> = Vec::new();
1118 // vec.resize(size, 0);
1119 NAPacket { stream: str, ts, keyframe: kf, buffer: NABufferRef::new(vec) }
1121 /// Returns information about the stream packet belongs to.
1122 pub fn get_stream(&self) -> NAStreamRef { self.stream.clone() }
1123 /// Returns packet timestamp.
1124 pub fn get_time_information(&self) -> NATimeInfo { self.ts }
1125 /// Returns packet presentation timestamp.
1126 pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() }
1127 /// Returns packet decoding timestamp.
1128 pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() }
1129 /// Returns packet duration.
1130 pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() }
1131 /// Reports whether this is a keyframe packet.
1132 pub fn is_keyframe(&self) -> bool { self.keyframe }
1133 /// Returns a reference to packet data.
1134 pub fn get_buffer(&self) -> NABufferRef<Vec<u8>> { self.buffer.clone() }
1137 impl Drop for NAPacket {
1138 fn drop(&mut self) {}
1141 impl fmt::Display for NAPacket {
1142 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1143 let mut ostr = format!("[pkt for {} size {}", self.stream, self.buffer.len());
1144 if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); }
1145 if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); }
1146 if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); }
1147 if self.keyframe { ostr = format!("{} kf", ostr); }
1149 write!(f, "{}", ostr)