core/frame: add proper function for audio frame truncation
[nihav.git] / nihav-core / src / frame.rs
1 //! Packets and decoded frames functionality.
2 use std::cmp::max;
3 //use std::collections::HashMap;
4 use std::fmt;
5 pub use std::sync::Arc;
6 pub use crate::formats::*;
7 pub use crate::refs::*;
8 use std::str::FromStr;
9
10 /// Audio stream information.
11 #[allow(dead_code)]
12 #[derive(Clone,Copy,PartialEq)]
13 pub struct NAAudioInfo {
14 /// Sample rate.
15 pub sample_rate: u32,
16 /// Number of channels.
17 pub channels: u8,
18 /// Audio sample format.
19 pub format: NASoniton,
20 /// Length of one audio block in samples.
21 pub block_len: usize,
22 }
23
24 impl NAAudioInfo {
25 /// Constructs a new `NAAudioInfo` instance.
26 pub fn new(sr: u32, ch: u8, fmt: NASoniton, bl: usize) -> Self {
27 NAAudioInfo { sample_rate: sr, channels: ch, format: fmt, block_len: bl }
28 }
29 /// Returns audio sample rate.
30 pub fn get_sample_rate(&self) -> u32 { self.sample_rate }
31 /// Returns the number of channels.
32 pub fn get_channels(&self) -> u8 { self.channels }
33 /// Returns sample format.
34 pub fn get_format(&self) -> NASoniton { self.format }
35 /// Returns one audio block duration in samples.
36 pub fn get_block_len(&self) -> usize { self.block_len }
37 }
38
39 impl fmt::Display for NAAudioInfo {
40 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
41 write!(f, "{} Hz, {} ch", self.sample_rate, self.channels)
42 }
43 }
44
45 /// Video stream information.
46 #[allow(dead_code)]
47 #[derive(Clone,Copy,PartialEq)]
48 pub struct NAVideoInfo {
49 /// Picture width.
50 pub width: usize,
51 /// Picture height.
52 pub height: usize,
53 /// Picture is stored downside up.
54 pub flipped: bool,
55 /// Picture pixel format.
56 pub format: NAPixelFormaton,
57 /// Declared bits per sample.
58 pub bits: u8,
59 }
60
61 impl NAVideoInfo {
62 /// Constructs a new `NAVideoInfo` instance.
63 pub fn new(w: usize, h: usize, flip: bool, fmt: NAPixelFormaton) -> Self {
64 let bits = fmt.get_total_depth();
65 NAVideoInfo { width: w, height: h, flipped: flip, format: fmt, bits }
66 }
67 /// Returns picture width.
68 pub fn get_width(&self) -> usize { self.width as usize }
69 /// Returns picture height.
70 pub fn get_height(&self) -> usize { self.height as usize }
71 /// Returns picture orientation.
72 pub fn is_flipped(&self) -> bool { self.flipped }
73 /// Returns picture pixel format.
74 pub fn get_format(&self) -> NAPixelFormaton { self.format }
75 /// Sets new picture width.
76 pub fn set_width(&mut self, w: usize) { self.width = w; }
77 /// Sets new picture height.
78 pub fn set_height(&mut self, h: usize) { self.height = h; }
79 }
80
81 impl fmt::Display for NAVideoInfo {
82 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
83 write!(f, "{}x{}", self.width, self.height)
84 }
85 }
86
87 /// A list of possible stream information types.
88 #[derive(Clone,Copy,PartialEq)]
89 pub enum NACodecTypeInfo {
90 /// No codec present.
91 None,
92 /// Audio codec information.
93 Audio(NAAudioInfo),
94 /// Video codec information.
95 Video(NAVideoInfo),
96 }
97
98 impl NACodecTypeInfo {
99 /// Returns video stream information.
100 pub fn get_video_info(&self) -> Option<NAVideoInfo> {
101 match *self {
102 NACodecTypeInfo::Video(vinfo) => Some(vinfo),
103 _ => None,
104 }
105 }
106 /// Returns audio stream information.
107 pub fn get_audio_info(&self) -> Option<NAAudioInfo> {
108 match *self {
109 NACodecTypeInfo::Audio(ainfo) => Some(ainfo),
110 _ => None,
111 }
112 }
113 /// Reports whether the current stream is video stream.
114 pub fn is_video(&self) -> bool {
115 match *self {
116 NACodecTypeInfo::Video(_) => true,
117 _ => false,
118 }
119 }
120 /// Reports whether the current stream is audio stream.
121 pub fn is_audio(&self) -> bool {
122 match *self {
123 NACodecTypeInfo::Audio(_) => true,
124 _ => false,
125 }
126 }
127 }
128
129 impl fmt::Display for NACodecTypeInfo {
130 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
131 let ret = match *self {
132 NACodecTypeInfo::None => "".to_string(),
133 NACodecTypeInfo::Audio(fmt) => format!("{}", fmt),
134 NACodecTypeInfo::Video(fmt) => format!("{}", fmt),
135 };
136 write!(f, "{}", ret)
137 }
138 }
139
140 /// Decoded video frame.
141 ///
142 /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer.
143 /// In case of image with several components those components are stored sequentially and can be accessed in the buffer starting at corresponding component offset.
144 #[derive(Clone)]
145 pub struct NAVideoBuffer<T> {
146 info: NAVideoInfo,
147 data: NABufferRef<Vec<T>>,
148 offs: Vec<usize>,
149 strides: Vec<usize>,
150 }
151
152 impl<T: Clone> NAVideoBuffer<T> {
153 /// Constructs video buffer from the provided components.
154 pub fn from_raw_parts(info: NAVideoInfo, data: NABufferRef<Vec<T>>, offs: Vec<usize>, strides: Vec<usize>) -> Self {
155 Self { info, data, offs, strides }
156 }
157 /// Returns the component offset (0 for all unavailable offsets).
158 pub fn get_offset(&self, idx: usize) -> usize {
159 if idx >= self.offs.len() { 0 }
160 else { self.offs[idx] }
161 }
162 /// Returns picture info.
163 pub fn get_info(&self) -> NAVideoInfo { self.info }
164 /// Returns an immutable reference to the data.
165 pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() }
166 /// Returns a mutable reference to the data.
167 pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() }
168 /// Returns the number of components in picture format.
169 pub fn get_num_components(&self) -> usize { self.offs.len() }
170 /// Creates a copy of current `NAVideoBuffer`.
171 pub fn copy_buffer(&mut self) -> Self {
172 let mut data: Vec<T> = Vec::with_capacity(self.data.len());
173 data.clone_from(self.data.as_ref());
174 let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
175 offs.clone_from(&self.offs);
176 let mut strides: Vec<usize> = Vec::with_capacity(self.strides.len());
177 strides.clone_from(&self.strides);
178 NAVideoBuffer { info: self.info, data: NABufferRef::new(data), offs, strides }
179 }
180 /// Returns stride (distance between subsequent lines) for the requested component.
181 pub fn get_stride(&self, idx: usize) -> usize {
182 if idx >= self.strides.len() { return 0; }
183 self.strides[idx]
184 }
185 /// Returns requested component dimensions.
186 pub fn get_dimensions(&self, idx: usize) -> (usize, usize) {
187 get_plane_size(&self.info, idx)
188 }
189 /// Converts current instance into buffer reference.
190 pub fn into_ref(self) -> NABufferRef<Self> {
191 NABufferRef::new(self)
192 }
193
194 fn print_contents(&self, datatype: &str) {
195 println!("{} video buffer size {}", datatype, self.data.len());
196 println!(" format {}", self.info);
197 print!(" offsets:");
198 for off in self.offs.iter() {
199 print!(" {}", *off);
200 }
201 println!();
202 print!(" strides:");
203 for stride in self.strides.iter() {
204 print!(" {}", *stride);
205 }
206 println!();
207 }
208 }
209
210 /// A specialised type for reference-counted `NAVideoBuffer`.
211 pub type NAVideoBufferRef<T> = NABufferRef<NAVideoBuffer<T>>;
212
213 /// Decoded audio frame.
214 ///
215 /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer.
216 /// In case of planar audio samples for each channel are stored sequentially and can be accessed in the buffer starting at corresponding channel offset.
217 #[derive(Clone)]
218 pub struct NAAudioBuffer<T> {
219 info: NAAudioInfo,
220 data: NABufferRef<Vec<T>>,
221 offs: Vec<usize>,
222 stride: usize,
223 step: usize,
224 chmap: NAChannelMap,
225 len: usize,
226 }
227
228 impl<T: Clone> NAAudioBuffer<T> {
229 /// Returns the start position of requested channel data.
230 pub fn get_offset(&self, idx: usize) -> usize {
231 if idx >= self.offs.len() { 0 }
232 else { self.offs[idx] }
233 }
234 /// Returns the distance between the start of one channel and the next one.
235 pub fn get_stride(&self) -> usize { self.stride }
236 /// Returns the distance between the samples in one channel.
237 pub fn get_step(&self) -> usize { self.step }
238 /// Returns audio format information.
239 pub fn get_info(&self) -> NAAudioInfo { self.info }
240 /// Returns channel map.
241 pub fn get_chmap(&self) -> &NAChannelMap { &self.chmap }
242 /// Returns an immutable reference to the data.
243 pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() }
244 /// Returns reference to the data.
245 pub fn get_data_ref(&self) -> NABufferRef<Vec<T>> { self.data.clone() }
246 /// Returns a mutable reference to the data.
247 pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() }
248 /// Clones current `NAAudioBuffer` into a new one.
249 pub fn copy_buffer(&mut self) -> Self {
250 let mut data: Vec<T> = Vec::with_capacity(self.data.len());
251 data.clone_from(self.data.as_ref());
252 let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
253 offs.clone_from(&self.offs);
254 NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride, step: self.step }
255 }
256 /// Return the length of frame in samples.
257 pub fn get_length(&self) -> usize { self.len }
258 /// Truncates buffer length if possible.
259 ///
260 /// In case when new length is larger than old length nothing is done.
261 pub fn truncate(&mut self, new_len: usize) {
262 self.len = self.len.min(new_len);
263 }
264
265 fn print_contents(&self, datatype: &str) {
266 println!("Audio buffer with {} data, stride {}, step {}", datatype, self.stride, self.step);
267 println!(" format {}", self.info);
268 println!(" channel map {}", self.chmap);
269 print!(" offsets:");
270 for off in self.offs.iter() {
271 print!(" {}", *off);
272 }
273 println!();
274 }
275 }
276
277 impl NAAudioBuffer<u8> {
278 /// Constructs a new `NAAudioBuffer` instance.
279 pub fn new_from_buf(info: NAAudioInfo, data: NABufferRef<Vec<u8>>, chmap: NAChannelMap) -> Self {
280 let len = data.len();
281 NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0, step: 0 }
282 }
283 }
284
285 /// A list of possible decoded frame types.
286 #[derive(Clone)]
287 pub enum NABufferType {
288 /// 8-bit video buffer.
289 Video (NAVideoBufferRef<u8>),
290 /// 16-bit video buffer (i.e. every component or packed pixel fits into 16 bits).
291 Video16 (NAVideoBufferRef<u16>),
292 /// 32-bit video buffer (i.e. every component or packed pixel fits into 32 bits).
293 Video32 (NAVideoBufferRef<u32>),
294 /// Packed video buffer.
295 VideoPacked(NAVideoBufferRef<u8>),
296 /// Audio buffer with 8-bit unsigned integer audio.
297 AudioU8 (NAAudioBuffer<u8>),
298 /// Audio buffer with 16-bit signed integer audio.
299 AudioI16 (NAAudioBuffer<i16>),
300 /// Audio buffer with 32-bit signed integer audio.
301 AudioI32 (NAAudioBuffer<i32>),
302 /// Audio buffer with 32-bit floating point audio.
303 AudioF32 (NAAudioBuffer<f32>),
304 /// Packed audio buffer.
305 AudioPacked(NAAudioBuffer<u8>),
306 /// Buffer with generic data (e.g. subtitles).
307 Data (NABufferRef<Vec<u8>>),
308 /// No data present.
309 None,
310 }
311
312 impl NABufferType {
313 /// Returns the offset to the requested component or channel.
314 pub fn get_offset(&self, idx: usize) -> usize {
315 match *self {
316 NABufferType::Video(ref vb) => vb.get_offset(idx),
317 NABufferType::Video16(ref vb) => vb.get_offset(idx),
318 NABufferType::Video32(ref vb) => vb.get_offset(idx),
319 NABufferType::VideoPacked(ref vb) => vb.get_offset(idx),
320 NABufferType::AudioU8(ref ab) => ab.get_offset(idx),
321 NABufferType::AudioI16(ref ab) => ab.get_offset(idx),
322 NABufferType::AudioI32(ref ab) => ab.get_offset(idx),
323 NABufferType::AudioF32(ref ab) => ab.get_offset(idx),
324 NABufferType::AudioPacked(ref ab) => ab.get_offset(idx),
325 _ => 0,
326 }
327 }
328 /// Returns information for video frames.
329 pub fn get_video_info(&self) -> Option<NAVideoInfo> {
330 match *self {
331 NABufferType::Video(ref vb) => Some(vb.get_info()),
332 NABufferType::Video16(ref vb) => Some(vb.get_info()),
333 NABufferType::Video32(ref vb) => Some(vb.get_info()),
334 NABufferType::VideoPacked(ref vb) => Some(vb.get_info()),
335 _ => None,
336 }
337 }
338 /// Returns reference to 8-bit (or packed) video buffer.
339 pub fn get_vbuf(&self) -> Option<NAVideoBufferRef<u8>> {
340 match *self {
341 NABufferType::Video(ref vb) => Some(vb.clone()),
342 NABufferType::VideoPacked(ref vb) => Some(vb.clone()),
343 _ => None,
344 }
345 }
346 /// Returns reference to 16-bit video buffer.
347 pub fn get_vbuf16(&self) -> Option<NAVideoBufferRef<u16>> {
348 match *self {
349 NABufferType::Video16(ref vb) => Some(vb.clone()),
350 _ => None,
351 }
352 }
353 /// Returns reference to 32-bit video buffer.
354 pub fn get_vbuf32(&self) -> Option<NAVideoBufferRef<u32>> {
355 match *self {
356 NABufferType::Video32(ref vb) => Some(vb.clone()),
357 _ => None,
358 }
359 }
360 /// Returns information for audio frames.
361 pub fn get_audio_info(&self) -> Option<NAAudioInfo> {
362 match *self {
363 NABufferType::AudioU8(ref ab) => Some(ab.get_info()),
364 NABufferType::AudioI16(ref ab) => Some(ab.get_info()),
365 NABufferType::AudioI32(ref ab) => Some(ab.get_info()),
366 NABufferType::AudioF32(ref ab) => Some(ab.get_info()),
367 NABufferType::AudioPacked(ref ab) => Some(ab.get_info()),
368 _ => None,
369 }
370 }
371 /// Returns audio channel map.
372 pub fn get_chmap(&self) -> Option<&NAChannelMap> {
373 match *self {
374 NABufferType::AudioU8(ref ab) => Some(ab.get_chmap()),
375 NABufferType::AudioI16(ref ab) => Some(ab.get_chmap()),
376 NABufferType::AudioI32(ref ab) => Some(ab.get_chmap()),
377 NABufferType::AudioF32(ref ab) => Some(ab.get_chmap()),
378 NABufferType::AudioPacked(ref ab) => Some(ab.get_chmap()),
379 _ => None,
380 }
381 }
382 /// Returns audio frame duration in samples.
383 pub fn get_audio_length(&self) -> usize {
384 match *self {
385 NABufferType::AudioU8(ref ab) => ab.get_length(),
386 NABufferType::AudioI16(ref ab) => ab.get_length(),
387 NABufferType::AudioI32(ref ab) => ab.get_length(),
388 NABufferType::AudioF32(ref ab) => ab.get_length(),
389 NABufferType::AudioPacked(ref ab) => ab.get_length(),
390 _ => 0,
391 }
392 }
393 /// Truncates audio frame duration if possible.
394 pub fn truncate_audio(&mut self, len: usize) {
395 match *self {
396 NABufferType::AudioU8(ref mut ab) => ab.truncate(len),
397 NABufferType::AudioI16(ref mut ab) => ab.truncate(len),
398 NABufferType::AudioI32(ref mut ab) => ab.truncate(len),
399 NABufferType::AudioF32(ref mut ab) => ab.truncate(len),
400 NABufferType::AudioPacked(ref mut ab) => ab.truncate(len),
401 _ => {},
402 };
403 }
404 /// Returns the distance between starts of two channels.
405 pub fn get_audio_stride(&self) -> usize {
406 match *self {
407 NABufferType::AudioU8(ref ab) => ab.get_stride(),
408 NABufferType::AudioI16(ref ab) => ab.get_stride(),
409 NABufferType::AudioI32(ref ab) => ab.get_stride(),
410 NABufferType::AudioF32(ref ab) => ab.get_stride(),
411 NABufferType::AudioPacked(ref ab) => ab.get_stride(),
412 _ => 0,
413 }
414 }
415 /// Returns the distance between two samples in one channel.
416 pub fn get_audio_step(&self) -> usize {
417 match *self {
418 NABufferType::AudioU8(ref ab) => ab.get_step(),
419 NABufferType::AudioI16(ref ab) => ab.get_step(),
420 NABufferType::AudioI32(ref ab) => ab.get_step(),
421 NABufferType::AudioF32(ref ab) => ab.get_step(),
422 NABufferType::AudioPacked(ref ab) => ab.get_step(),
423 _ => 0,
424 }
425 }
426 /// Returns reference to 8-bit (or packed) audio buffer.
427 pub fn get_abuf_u8(&self) -> Option<NAAudioBuffer<u8>> {
428 match *self {
429 NABufferType::AudioU8(ref ab) => Some(ab.clone()),
430 NABufferType::AudioPacked(ref ab) => Some(ab.clone()),
431 _ => None,
432 }
433 }
434 /// Returns reference to 16-bit audio buffer.
435 pub fn get_abuf_i16(&self) -> Option<NAAudioBuffer<i16>> {
436 match *self {
437 NABufferType::AudioI16(ref ab) => Some(ab.clone()),
438 _ => None,
439 }
440 }
441 /// Returns reference to 32-bit integer audio buffer.
442 pub fn get_abuf_i32(&self) -> Option<NAAudioBuffer<i32>> {
443 match *self {
444 NABufferType::AudioI32(ref ab) => Some(ab.clone()),
445 _ => None,
446 }
447 }
448 /// Returns reference to 32-bit floating point audio buffer.
449 pub fn get_abuf_f32(&self) -> Option<NAAudioBuffer<f32>> {
450 match *self {
451 NABufferType::AudioF32(ref ab) => Some(ab.clone()),
452 _ => None,
453 }
454 }
455 /// Prints internal buffer layout.
456 pub fn print_buffer_metadata(&self) {
457 match *self {
458 NABufferType::Video(ref buf) => buf.print_contents("8-bit"),
459 NABufferType::Video16(ref buf) => buf.print_contents("16-bit"),
460 NABufferType::Video32(ref buf) => buf.print_contents("32-bit"),
461 NABufferType::VideoPacked(ref buf) => buf.print_contents("packed"),
462 NABufferType::AudioU8(ref buf) => buf.print_contents("8-bit unsigned integer"),
463 NABufferType::AudioI16(ref buf) => buf.print_contents("16-bit integer"),
464 NABufferType::AudioI32(ref buf) => buf.print_contents("32-bit integer"),
465 NABufferType::AudioF32(ref buf) => buf.print_contents("32-bit float"),
466 NABufferType::AudioPacked(ref buf) => buf.print_contents("packed"),
467 NABufferType::Data(ref buf) => { println!("Data buffer, len = {}", buf.len()); },
468 NABufferType::None => { println!("No buffer"); },
469 };
470 }
471 }
472
473 const NA_SIMPLE_VFRAME_COMPONENTS: usize = 4;
474 /// Simplified decoded frame data.
475 pub struct NASimpleVideoFrame<'a, T: Copy> {
476 /// Widths of each picture component.
477 pub width: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
478 /// Heights of each picture component.
479 pub height: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
480 /// Orientation (upside-down or downside-up) flag.
481 pub flip: bool,
482 /// Strides for each component.
483 pub stride: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
484 /// Start of each component.
485 pub offset: [usize; NA_SIMPLE_VFRAME_COMPONENTS],
486 /// Number of components.
487 pub components: usize,
488 /// Pointer to the picture pixel data.
489 pub data: &'a mut [T],
490 }
491
492 impl<'a, T:Copy> NASimpleVideoFrame<'a, T> {
493 /// Constructs a new instance of `NASimpleVideoFrame` from `NAVideoBuffer`.
494 pub fn from_video_buf(vbuf: &'a mut NAVideoBuffer<T>) -> Option<Self> {
495 let vinfo = vbuf.get_info();
496 let components = vinfo.format.components as usize;
497 if components > NA_SIMPLE_VFRAME_COMPONENTS {
498 return None;
499 }
500 let mut w: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
501 let mut h: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
502 let mut s: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
503 let mut o: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS];
504 for comp in 0..components {
505 let (width, height) = vbuf.get_dimensions(comp);
506 w[comp] = width;
507 h[comp] = height;
508 s[comp] = vbuf.get_stride(comp);
509 o[comp] = vbuf.get_offset(comp);
510 }
511 let flip = vinfo.flipped;
512 Some(NASimpleVideoFrame {
513 width: w,
514 height: h,
515 flip,
516 stride: s,
517 offset: o,
518 components,
519 data: vbuf.data.as_mut_slice(),
520 })
521 }
522 }
523
524 /// A list of possible frame allocator errors.
525 #[derive(Debug,Clone,Copy,PartialEq)]
526 pub enum AllocatorError {
527 /// Requested picture dimensions are too large.
528 TooLargeDimensions,
529 /// Invalid input format.
530 FormatError,
531 }
532
533 /// Constructs a new video buffer with requested format.
534 ///
535 /// `align` is power of two alignment for image. E.g. the value of 5 means that frame dimensions will be padded to be multiple of 32.
536 pub fn alloc_video_buffer(vinfo: NAVideoInfo, align: u8) -> Result<NABufferType, AllocatorError> {
537 let fmt = &vinfo.format;
538 let mut new_size: usize = 0;
539 let mut offs: Vec<usize> = Vec::new();
540 let mut strides: Vec<usize> = Vec::new();
541
542 for i in 0..fmt.get_num_comp() {
543 if fmt.get_chromaton(i) == None { return Err(AllocatorError::FormatError); }
544 }
545
546 let align_mod = ((1 << align) as usize) - 1;
547 let width = ((vinfo.width as usize) + align_mod) & !align_mod;
548 let height = ((vinfo.height as usize) + align_mod) & !align_mod;
549 let mut max_depth = 0;
550 let mut all_packed = true;
551 let mut all_bytealigned = true;
552 for i in 0..fmt.get_num_comp() {
553 let ochr = fmt.get_chromaton(i);
554 if ochr.is_none() { continue; }
555 let chr = ochr.unwrap();
556 if !chr.is_packed() {
557 all_packed = false;
558 } else if ((chr.get_shift() + chr.get_depth()) & 7) != 0 {
559 all_bytealigned = false;
560 }
561 max_depth = max(max_depth, chr.get_depth());
562 }
563 let unfit_elem_size = match fmt.get_elem_size() {
564 2 | 4 => false,
565 _ => true,
566 };
567
568 //todo semi-packed like NV12
569 if fmt.is_paletted() {
570 //todo various-sized palettes?
571 let stride = vinfo.get_format().get_chromaton(0).unwrap().get_linesize(width);
572 let pic_sz = stride.checked_mul(height);
573 if pic_sz == None { return Err(AllocatorError::TooLargeDimensions); }
574 let pal_size = 256 * (fmt.get_elem_size() as usize);
575 let new_size = pic_sz.unwrap().checked_add(pal_size);
576 if new_size == None { return Err(AllocatorError::TooLargeDimensions); }
577 offs.push(0);
578 offs.push(stride * height);
579 strides.push(stride);
580 let data: Vec<u8> = vec![0; new_size.unwrap()];
581 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
582 Ok(NABufferType::Video(buf.into_ref()))
583 } else if !all_packed {
584 for i in 0..fmt.get_num_comp() {
585 let ochr = fmt.get_chromaton(i);
586 if ochr.is_none() { continue; }
587 let chr = ochr.unwrap();
588 offs.push(new_size as usize);
589 let stride = chr.get_linesize(width);
590 let cur_h = chr.get_height(height);
591 let cur_sz = stride.checked_mul(cur_h);
592 if cur_sz == None { return Err(AllocatorError::TooLargeDimensions); }
593 let new_sz = new_size.checked_add(cur_sz.unwrap());
594 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
595 new_size = new_sz.unwrap();
596 strides.push(stride);
597 }
598 if max_depth <= 8 {
599 let data: Vec<u8> = vec![0; new_size];
600 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
601 Ok(NABufferType::Video(buf.into_ref()))
602 } else if max_depth <= 16 {
603 let data: Vec<u16> = vec![0; new_size];
604 let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
605 Ok(NABufferType::Video16(buf.into_ref()))
606 } else {
607 let data: Vec<u32> = vec![0; new_size];
608 let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
609 Ok(NABufferType::Video32(buf.into_ref()))
610 }
611 } else if all_bytealigned || unfit_elem_size {
612 let elem_sz = fmt.get_elem_size();
613 let line_sz = width.checked_mul(elem_sz as usize);
614 if line_sz == None { return Err(AllocatorError::TooLargeDimensions); }
615 let new_sz = line_sz.unwrap().checked_mul(height);
616 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
617 new_size = new_sz.unwrap();
618 let data: Vec<u8> = vec![0; new_size];
619 strides.push(line_sz.unwrap());
620 let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
621 Ok(NABufferType::VideoPacked(buf.into_ref()))
622 } else {
623 let elem_sz = fmt.get_elem_size();
624 let new_sz = width.checked_mul(height);
625 if new_sz == None { return Err(AllocatorError::TooLargeDimensions); }
626 new_size = new_sz.unwrap();
627 match elem_sz {
628 2 => {
629 let data: Vec<u16> = vec![0; new_size];
630 strides.push(width);
631 let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
632 Ok(NABufferType::Video16(buf.into_ref()))
633 },
634 4 => {
635 let data: Vec<u32> = vec![0; new_size];
636 strides.push(width);
637 let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides };
638 Ok(NABufferType::Video32(buf.into_ref()))
639 },
640 _ => unreachable!(),
641 }
642 }
643 }
644
645 /// Constructs a new audio buffer for the requested format and length.
646 #[allow(clippy::collapsible_if)]
647 pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelMap) -> Result<NABufferType, AllocatorError> {
648 let mut offs: Vec<usize> = Vec::new();
649 if ainfo.format.is_planar() || ((ainfo.format.get_bits() % 8) == 0) {
650 let len = nsamples.checked_mul(ainfo.channels as usize);
651 if len == None { return Err(AllocatorError::TooLargeDimensions); }
652 let length = len.unwrap();
653 let stride;
654 let step;
655 if ainfo.format.is_planar() {
656 stride = nsamples;
657 step = 1;
658 for i in 0..ainfo.channels {
659 offs.push((i as usize) * stride);
660 }
661 } else {
662 stride = 1;
663 step = ainfo.channels as usize;
664 for i in 0..ainfo.channels {
665 offs.push(i as usize);
666 }
667 }
668 if ainfo.format.is_float() {
669 if ainfo.format.get_bits() == 32 {
670 let data: Vec<f32> = vec![0.0; length];
671 let buf: NAAudioBuffer<f32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
672 Ok(NABufferType::AudioF32(buf))
673 } else {
674 Err(AllocatorError::TooLargeDimensions)
675 }
676 } else {
677 if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() {
678 let data: Vec<u8> = vec![0; length];
679 let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
680 Ok(NABufferType::AudioU8(buf))
681 } else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() {
682 let data: Vec<i16> = vec![0; length];
683 let buf: NAAudioBuffer<i16> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
684 Ok(NABufferType::AudioI16(buf))
685 } else if ainfo.format.get_bits() == 32 && ainfo.format.is_signed() {
686 let data: Vec<i32> = vec![0; length];
687 let buf: NAAudioBuffer<i32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
688 Ok(NABufferType::AudioI32(buf))
689 } else {
690 Err(AllocatorError::TooLargeDimensions)
691 }
692 }
693 } else {
694 let len = nsamples.checked_mul(ainfo.channels as usize);
695 if len == None { return Err(AllocatorError::TooLargeDimensions); }
696 let length = ainfo.format.get_audio_size(len.unwrap() as u64);
697 let data: Vec<u8> = vec![0; length];
698 let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0, step: 0 };
699 Ok(NABufferType::AudioPacked(buf))
700 }
701 }
702
703 /// Constructs a new buffer for generic data.
704 pub fn alloc_data_buffer(size: usize) -> Result<NABufferType, AllocatorError> {
705 let data: Vec<u8> = vec![0; size];
706 let buf: NABufferRef<Vec<u8>> = NABufferRef::new(data);
707 Ok(NABufferType::Data(buf))
708 }
709
710 /// Creates a clone of current buffer.
711 pub fn copy_buffer(buf: &NABufferType) -> NABufferType {
712 buf.clone()
713 }
714
715 /// Video frame pool.
716 ///
717 /// This structure allows codec to effectively reuse old frames instead of allocating and de-allocating frames every time.
718 /// Caller can also reserve some frames for its own purposes e.g. display queue.
719 pub struct NAVideoBufferPool<T:Copy> {
720 pool: Vec<NAVideoBufferRef<T>>,
721 max_len: usize,
722 add_len: usize,
723 }
724
725 impl<T:Copy> NAVideoBufferPool<T> {
726 /// Constructs a new `NAVideoBufferPool` instance.
727 pub fn new(max_len: usize) -> Self {
728 Self {
729 pool: Vec::with_capacity(max_len),
730 max_len,
731 add_len: 0,
732 }
733 }
734 /// Sets the number of buffers reserved for the user.
735 pub fn set_dec_bufs(&mut self, add_len: usize) {
736 self.add_len = add_len;
737 }
738 /// Returns an unused buffer from the pool.
739 pub fn get_free(&mut self) -> Option<NAVideoBufferRef<T>> {
740 for e in self.pool.iter() {
741 if e.get_num_refs() == 1 {
742 return Some(e.clone());
743 }
744 }
745 None
746 }
747 /// Clones provided frame data into a free pool frame.
748 pub fn get_copy(&mut self, rbuf: &NAVideoBufferRef<T>) -> Option<NAVideoBufferRef<T>> {
749 let mut dbuf = self.get_free()?;
750 dbuf.data.copy_from_slice(&rbuf.data);
751 Some(dbuf)
752 }
753 /// Clears the pool from all frames.
754 pub fn reset(&mut self) {
755 self.pool.truncate(0);
756 }
757 }
758
759 impl NAVideoBufferPool<u8> {
760 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
761 ///
762 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
763 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
764 let nbufs = self.max_len + self.add_len - self.pool.len();
765 for _ in 0..nbufs {
766 let vbuf = alloc_video_buffer(vinfo, align)?;
767 if let NABufferType::Video(buf) = vbuf {
768 self.pool.push(buf);
769 } else if let NABufferType::VideoPacked(buf) = vbuf {
770 self.pool.push(buf);
771 } else {
772 return Err(AllocatorError::FormatError);
773 }
774 }
775 Ok(())
776 }
777 }
778
779 impl NAVideoBufferPool<u16> {
780 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
781 ///
782 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
783 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
784 let nbufs = self.max_len + self.add_len - self.pool.len();
785 for _ in 0..nbufs {
786 let vbuf = alloc_video_buffer(vinfo, align)?;
787 if let NABufferType::Video16(buf) = vbuf {
788 self.pool.push(buf);
789 } else {
790 return Err(AllocatorError::FormatError);
791 }
792 }
793 Ok(())
794 }
795 }
796
797 impl NAVideoBufferPool<u32> {
798 /// Allocates the target amount of video frames using [`alloc_video_buffer`].
799 ///
800 /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html
801 pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> {
802 let nbufs = self.max_len + self.add_len - self.pool.len();
803 for _ in 0..nbufs {
804 let vbuf = alloc_video_buffer(vinfo, align)?;
805 if let NABufferType::Video32(buf) = vbuf {
806 self.pool.push(buf);
807 } else {
808 return Err(AllocatorError::FormatError);
809 }
810 }
811 Ok(())
812 }
813 }
814
815 /// Information about codec contained in a stream.
816 #[allow(dead_code)]
817 #[derive(Clone)]
818 pub struct NACodecInfo {
819 name: &'static str,
820 properties: NACodecTypeInfo,
821 extradata: Option<Arc<Vec<u8>>>,
822 }
823
824 /// A specialised type for reference-counted `NACodecInfo`.
825 pub type NACodecInfoRef = Arc<NACodecInfo>;
826
827 impl NACodecInfo {
828 /// Constructs a new instance of `NACodecInfo`.
829 pub fn new(name: &'static str, p: NACodecTypeInfo, edata: Option<Vec<u8>>) -> Self {
830 let extradata = match edata {
831 None => None,
832 Some(vec) => Some(Arc::new(vec)),
833 };
834 NACodecInfo { name, properties: p, extradata }
835 }
836 /// Constructs a new reference-counted instance of `NACodecInfo`.
837 pub fn new_ref(name: &'static str, p: NACodecTypeInfo, edata: Option<Arc<Vec<u8>>>) -> Self {
838 NACodecInfo { name, properties: p, extradata: edata }
839 }
840 /// Converts current instance into a reference-counted one.
841 pub fn into_ref(self) -> NACodecInfoRef { Arc::new(self) }
842 /// Returns codec information.
843 pub fn get_properties(&self) -> NACodecTypeInfo { self.properties }
844 /// Returns additional initialisation data required by the codec.
845 pub fn get_extradata(&self) -> Option<Arc<Vec<u8>>> {
846 if let Some(ref vec) = self.extradata { return Some(vec.clone()); }
847 None
848 }
849 /// Returns codec name.
850 pub fn get_name(&self) -> &'static str { self.name }
851 /// Reports whether it is a video codec.
852 pub fn is_video(&self) -> bool {
853 if let NACodecTypeInfo::Video(_) = self.properties { return true; }
854 false
855 }
856 /// Reports whether it is an audio codec.
857 pub fn is_audio(&self) -> bool {
858 if let NACodecTypeInfo::Audio(_) = self.properties { return true; }
859 false
860 }
861 /// Constructs a new empty reference-counted instance of `NACodecInfo`.
862 pub fn new_dummy() -> Arc<Self> {
863 Arc::new(DUMMY_CODEC_INFO)
864 }
865 /// Updates codec infomation.
866 pub fn replace_info(&self, p: NACodecTypeInfo) -> Arc<Self> {
867 Arc::new(NACodecInfo { name: self.name, properties: p, extradata: self.extradata.clone() })
868 }
869 }
870
871 impl Default for NACodecInfo {
872 fn default() -> Self { DUMMY_CODEC_INFO }
873 }
874
875 impl fmt::Display for NACodecInfo {
876 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
877 let edata = match self.extradata.clone() {
878 None => "no extradata".to_string(),
879 Some(v) => format!("{} byte(s) of extradata", v.len()),
880 };
881 write!(f, "{}: {} {}", self.name, self.properties, edata)
882 }
883 }
884
885 /// Default empty codec information.
886 pub const DUMMY_CODEC_INFO: NACodecInfo = NACodecInfo {
887 name: "none",
888 properties: NACodecTypeInfo::None,
889 extradata: None };
890
891 /// A list of recognized frame types.
892 #[derive(Debug,Clone,Copy,PartialEq)]
893 #[allow(dead_code)]
894 pub enum FrameType {
895 /// Intra frame type.
896 I,
897 /// Inter frame type.
898 P,
899 /// Bidirectionally predicted frame.
900 B,
901 /// Skip frame.
902 ///
903 /// When such frame is encountered then last frame should be used again if it is needed.
904 Skip,
905 /// Some other frame type.
906 Other,
907 }
908
909 impl fmt::Display for FrameType {
910 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
911 match *self {
912 FrameType::I => write!(f, "I"),
913 FrameType::P => write!(f, "P"),
914 FrameType::B => write!(f, "B"),
915 FrameType::Skip => write!(f, "skip"),
916 FrameType::Other => write!(f, "x"),
917 }
918 }
919 }
920
921 /// Timestamp information.
922 #[derive(Debug,Clone,Copy)]
923 pub struct NATimeInfo {
924 /// Presentation timestamp.
925 pub pts: Option<u64>,
926 /// Decode timestamp.
927 pub dts: Option<u64>,
928 /// Duration (in timebase units).
929 pub duration: Option<u64>,
930 /// Timebase numerator.
931 pub tb_num: u32,
932 /// Timebase denominator.
933 pub tb_den: u32,
934 }
935
936 impl NATimeInfo {
937 /// Constructs a new `NATimeInfo` instance.
938 pub fn new(pts: Option<u64>, dts: Option<u64>, duration: Option<u64>, tb_num: u32, tb_den: u32) -> Self {
939 NATimeInfo { pts, dts, duration, tb_num, tb_den }
940 }
941 /// Returns presentation timestamp.
942 pub fn get_pts(&self) -> Option<u64> { self.pts }
943 /// Returns decoding timestamp.
944 pub fn get_dts(&self) -> Option<u64> { self.dts }
945 /// Returns duration.
946 pub fn get_duration(&self) -> Option<u64> { self.duration }
947 /// Sets new presentation timestamp.
948 pub fn set_pts(&mut self, pts: Option<u64>) { self.pts = pts; }
949 /// Sets new decoding timestamp.
950 pub fn set_dts(&mut self, dts: Option<u64>) { self.dts = dts; }
951 /// Sets new duration.
952 pub fn set_duration(&mut self, dur: Option<u64>) { self.duration = dur; }
953
954 /// Converts time in given scale into timestamp in given base.
955 #[allow(clippy::collapsible_if)]
956 pub fn time_to_ts(time: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 {
957 let tb_num = u64::from(tb_num);
958 let tb_den = u64::from(tb_den);
959 let tmp = time.checked_mul(tb_den);
960 if let Some(tmp) = tmp {
961 tmp / base / tb_num
962 } else {
963 if tb_num < base {
964 let coarse = time / tb_num;
965 if let Some(tmp) = coarse.checked_mul(tb_den) {
966 tmp / base
967 } else {
968 (coarse / base) * tb_den
969 }
970 } else {
971 let coarse = time / base;
972 if let Some(tmp) = coarse.checked_mul(tb_den) {
973 tmp / tb_num
974 } else {
975 (coarse / tb_num) * tb_den
976 }
977 }
978 }
979 }
980 /// Converts timestamp in given base into time in given scale.
981 pub fn ts_to_time(ts: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 {
982 let tb_num = u64::from(tb_num);
983 let tb_den = u64::from(tb_den);
984 let tmp = ts.checked_mul(base);
985 if let Some(tmp) = tmp {
986 let tmp2 = tmp.checked_mul(tb_num);
987 if let Some(tmp2) = tmp2 {
988 tmp2 / tb_den
989 } else {
990 (tmp / tb_den) * tb_num
991 }
992 } else {
993 let tmp = ts.checked_mul(tb_num);
994 if let Some(tmp) = tmp {
995 (tmp / tb_den) * base
996 } else {
997 (ts / tb_den) * base * tb_num
998 }
999 }
1000 }
1001 fn get_cur_ts(&self) -> u64 { self.pts.unwrap_or_else(|| self.dts.unwrap_or(0)) }
1002 fn get_cur_millis(&self) -> u64 {
1003 let ts = self.get_cur_ts();
1004 Self::ts_to_time(ts, 1000, self.tb_num, self.tb_den)
1005 }
1006 /// Checks whether the current time information is earler than provided reference time.
1007 pub fn less_than(&self, time: NATimePoint) -> bool {
1008 if self.pts.is_none() && self.dts.is_none() {
1009 return true;
1010 }
1011 match time {
1012 NATimePoint::PTS(rpts) => self.get_cur_ts() < rpts,
1013 NATimePoint::Milliseconds(ms) => self.get_cur_millis() < ms,
1014 NATimePoint::None => false,
1015 }
1016 }
1017 /// Checks whether the current time information is the same as provided reference time.
1018 pub fn equal(&self, time: NATimePoint) -> bool {
1019 if self.pts.is_none() && self.dts.is_none() {
1020 return time == NATimePoint::None;
1021 }
1022 match time {
1023 NATimePoint::PTS(rpts) => self.get_cur_ts() == rpts,
1024 NATimePoint::Milliseconds(ms) => self.get_cur_millis() == ms,
1025 NATimePoint::None => false,
1026 }
1027 }
1028 }
1029
1030 /// Time information for specifying durations or seek positions.
1031 #[derive(Clone,Copy,Debug,PartialEq)]
1032 pub enum NATimePoint {
1033 /// Time in milliseconds.
1034 Milliseconds(u64),
1035 /// Stream timestamp.
1036 PTS(u64),
1037 /// No time information present.
1038 None,
1039 }
1040
1041 impl Default for NATimePoint {
1042 fn default() -> Self {
1043 NATimePoint::None
1044 }
1045 }
1046
1047 impl fmt::Display for NATimePoint {
1048 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1049 match *self {
1050 NATimePoint::Milliseconds(millis) => {
1051 let tot_s = millis / 1000;
1052 let ms = millis % 1000;
1053 if tot_s < 60 {
1054 if ms != 0 {
1055 return write!(f, "{}.{:03}", tot_s, ms);
1056 } else {
1057 return write!(f, "{}", tot_s);
1058 }
1059 }
1060 let tot_m = tot_s / 60;
1061 let s = tot_s % 60;
1062 if tot_m < 60 {
1063 if ms != 0 {
1064 return write!(f, "{}:{:02}.{:03}", tot_m, s, ms);
1065 } else {
1066 return write!(f, "{}:{:02}", tot_m, s);
1067 }
1068 }
1069 let h = tot_m / 60;
1070 let m = tot_m % 60;
1071 if ms != 0 {
1072 write!(f, "{}:{:02}:{:02}.{:03}", h, m, s, ms)
1073 } else {
1074 write!(f, "{}:{:02}:{:02}", h, m, s)
1075 }
1076 },
1077 NATimePoint::PTS(pts) => {
1078 write!(f, "{}pts", pts)
1079 },
1080 NATimePoint::None => {
1081 write!(f, "none")
1082 },
1083 }
1084 }
1085 }
1086
1087 impl FromStr for NATimePoint {
1088 type Err = FormatParseError;
1089
1090 /// Parses the string into time information.
1091 ///
1092 /// Accepted formats are `<u64>pts`, `<u64>ms` or `[hh:][mm:]ss[.ms]`.
1093 fn from_str(s: &str) -> Result<Self, Self::Err> {
1094 if s.is_empty() {
1095 return Err(FormatParseError {});
1096 }
1097 if !s.ends_with("pts") {
1098 if s.ends_with("ms") {
1099 let str_b = s.as_bytes();
1100 let num = std::str::from_utf8(&str_b[..str_b.len() - 2]).unwrap();
1101 let ret = num.parse::<u64>();
1102 if let Ok(val) = ret {
1103 return Ok(NATimePoint::Milliseconds(val));
1104 } else {
1105 return Err(FormatParseError {});
1106 }
1107 }
1108 let mut parts = s.split(':');
1109 let mut hrs = None;
1110 let mut mins = None;
1111 let mut secs = parts.next();
1112 if let Some(part) = parts.next() {
1113 std::mem::swap(&mut mins, &mut secs);
1114 secs = Some(part);
1115 }
1116 if let Some(part) = parts.next() {
1117 std::mem::swap(&mut hrs, &mut mins);
1118 std::mem::swap(&mut mins, &mut secs);
1119 secs = Some(part);
1120 }
1121 if parts.next().is_some() {
1122 return Err(FormatParseError {});
1123 }
1124 let hours = if let Some(val) = hrs {
1125 let ret = val.parse::<u64>();
1126 if ret.is_err() { return Err(FormatParseError {}); }
1127 let val = ret.unwrap();
1128 if val > 1000 { return Err(FormatParseError {}); }
1129 val
1130 } else { 0 };
1131 let minutes = if let Some(val) = mins {
1132 let ret = val.parse::<u64>();
1133 if ret.is_err() { return Err(FormatParseError {}); }
1134 let val = ret.unwrap();
1135 if val >= 60 { return Err(FormatParseError {}); }
1136 val
1137 } else { 0 };
1138 let (seconds, millis) = if let Some(val) = secs {
1139 let mut parts = val.split('.');
1140 let ret = parts.next().unwrap().parse::<u64>();
1141 if ret.is_err() { return Err(FormatParseError {}); }
1142 let seconds = ret.unwrap();
1143 if mins.is_some() && seconds >= 60 { return Err(FormatParseError {}); }
1144 let millis = if let Some(val) = parts.next() {
1145 let mut mval = 0;
1146 let mut base = 0;
1147 for ch in val.chars() {
1148 if ch >= '0' && ch <= '9' {
1149 mval = mval * 10 + u64::from((ch as u8) - b'0');
1150 base += 1;
1151 if base > 3 { break; }
1152 } else {
1153 return Err(FormatParseError {});
1154 }
1155 }
1156 while base < 3 {
1157 mval *= 10;
1158 base += 1;
1159 }
1160 mval
1161 } else { 0 };
1162 (seconds, millis)
1163 } else { unreachable!(); };
1164 let tot_secs = hours * 60 * 60 + minutes * 60 + seconds;
1165 Ok(NATimePoint::Milliseconds(tot_secs * 1000 + millis))
1166 } else {
1167 let str_b = s.as_bytes();
1168 let num = std::str::from_utf8(&str_b[..str_b.len() - 3]).unwrap();
1169 let ret = num.parse::<u64>();
1170 if let Ok(val) = ret {
1171 Ok(NATimePoint::PTS(val))
1172 } else {
1173 Err(FormatParseError {})
1174 }
1175 }
1176 }
1177 }
1178
1179 /// Decoded frame information.
1180 #[allow(dead_code)]
1181 #[derive(Clone)]
1182 pub struct NAFrame {
1183 /// Frame timestamp.
1184 pub ts: NATimeInfo,
1185 /// Frame ID.
1186 pub id: i64,
1187 buffer: NABufferType,
1188 info: NACodecInfoRef,
1189 /// Frame type.
1190 pub frame_type: FrameType,
1191 /// Keyframe flag.
1192 pub key: bool,
1193 // options: HashMap<String, NAValue>,
1194 }
1195
1196 /// A specialised type for reference-counted `NAFrame`.
1197 pub type NAFrameRef = Arc<NAFrame>;
1198
1199 fn get_plane_size(info: &NAVideoInfo, idx: usize) -> (usize, usize) {
1200 let chromaton = info.get_format().get_chromaton(idx);
1201 if chromaton.is_none() { return (0, 0); }
1202 let (hs, vs) = chromaton.unwrap().get_subsampling();
1203 let w = (info.get_width() + ((1 << hs) - 1)) >> hs;
1204 let h = (info.get_height() + ((1 << vs) - 1)) >> vs;
1205 (w, h)
1206 }
1207
1208 impl NAFrame {
1209 /// Constructs a new `NAFrame` instance.
1210 pub fn new(ts: NATimeInfo,
1211 ftype: FrameType,
1212 keyframe: bool,
1213 info: NACodecInfoRef,
1214 /*options: HashMap<String, NAValue>,*/
1215 buffer: NABufferType) -> Self {
1216 NAFrame { ts, id: 0, buffer, info, frame_type: ftype, key: keyframe/*, options*/ }
1217 }
1218 /// Returns frame format information.
1219 pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() }
1220 /// Returns frame type.
1221 pub fn get_frame_type(&self) -> FrameType { self.frame_type }
1222 /// Reports whether the frame is a keyframe.
1223 pub fn is_keyframe(&self) -> bool { self.key }
1224 /// Sets new frame type.
1225 pub fn set_frame_type(&mut self, ftype: FrameType) { self.frame_type = ftype; }
1226 /// Sets keyframe flag.
1227 pub fn set_keyframe(&mut self, key: bool) { self.key = key; }
1228 /// Returns frame timestamp.
1229 pub fn get_time_information(&self) -> NATimeInfo { self.ts }
1230 /// Returns frame presentation time.
1231 pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() }
1232 /// Returns frame decoding time.
1233 pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() }
1234 /// Returns picture ID.
1235 pub fn get_id(&self) -> i64 { self.id }
1236 /// Returns frame display duration.
1237 pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() }
1238 /// Sets new presentation timestamp.
1239 pub fn set_pts(&mut self, pts: Option<u64>) { self.ts.set_pts(pts); }
1240 /// Sets new decoding timestamp.
1241 pub fn set_dts(&mut self, dts: Option<u64>) { self.ts.set_dts(dts); }
1242 /// Sets new picture ID.
1243 pub fn set_id(&mut self, id: i64) { self.id = id; }
1244 /// Sets new duration.
1245 pub fn set_duration(&mut self, dur: Option<u64>) { self.ts.set_duration(dur); }
1246
1247 /// Returns a reference to the frame data.
1248 pub fn get_buffer(&self) -> NABufferType { self.buffer.clone() }
1249
1250 /// Converts current instance into a reference-counted one.
1251 pub fn into_ref(self) -> NAFrameRef { Arc::new(self) }
1252
1253 /// Creates new frame with metadata from `NAPacket`.
1254 pub fn new_from_pkt(pkt: &NAPacket, info: NACodecInfoRef, buf: NABufferType) -> NAFrame {
1255 NAFrame::new(pkt.ts, FrameType::Other, pkt.keyframe, info, /*HashMap::new(),*/ buf)
1256 }
1257 }
1258
1259 impl fmt::Display for NAFrame {
1260 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1261 let mut ostr = format!("frame type {}", self.frame_type);
1262 if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); }
1263 if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); }
1264 if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); }
1265 if self.key { ostr = format!("{} kf", ostr); }
1266 write!(f, "[{}]", ostr)
1267 }
1268 }
1269
1270 /// A list of possible stream types.
1271 #[derive(Debug,Clone,Copy,PartialEq)]
1272 #[allow(dead_code)]
1273 pub enum StreamType {
1274 /// Video stream.
1275 Video,
1276 /// Audio stream.
1277 Audio,
1278 /// Subtitles.
1279 Subtitles,
1280 /// Any data stream (or might be an unrecognized audio/video stream).
1281 Data,
1282 /// Nonexistent stream.
1283 None,
1284 }
1285
1286 impl fmt::Display for StreamType {
1287 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1288 match *self {
1289 StreamType::Video => write!(f, "Video"),
1290 StreamType::Audio => write!(f, "Audio"),
1291 StreamType::Subtitles => write!(f, "Subtitles"),
1292 StreamType::Data => write!(f, "Data"),
1293 StreamType::None => write!(f, "-"),
1294 }
1295 }
1296 }
1297
1298 /// Stream data.
1299 #[allow(dead_code)]
1300 #[derive(Clone)]
1301 pub struct NAStream {
1302 media_type: StreamType,
1303 /// Stream ID.
1304 pub id: u32,
1305 num: usize,
1306 info: NACodecInfoRef,
1307 /// Timebase numerator.
1308 pub tb_num: u32,
1309 /// Timebase denominator.
1310 pub tb_den: u32,
1311 /// Duration in timebase units (zero if not available).
1312 pub duration: u64,
1313 }
1314
1315 /// A specialised reference-counted `NAStream` type.
1316 pub type NAStreamRef = Arc<NAStream>;
1317
1318 /// Downscales the timebase by its greatest common denominator.
1319 #[allow(clippy::comparison_chain)]
1320 pub fn reduce_timebase(tb_num: u32, tb_den: u32) -> (u32, u32) {
1321 if tb_num == 0 { return (tb_num, tb_den); }
1322 if (tb_den % tb_num) == 0 { return (1, tb_den / tb_num); }
1323
1324 let mut a = tb_num;
1325 let mut b = tb_den;
1326
1327 while a != b {
1328 if a > b { a -= b; }
1329 else if b > a { b -= a; }
1330 }
1331
1332 (tb_num / a, tb_den / a)
1333 }
1334
1335 impl NAStream {
1336 /// Constructs a new `NAStream` instance.
1337 pub fn new(mt: StreamType, id: u32, info: NACodecInfo, tb_num: u32, tb_den: u32, duration: u64) -> Self {
1338 let (n, d) = reduce_timebase(tb_num, tb_den);
1339 NAStream { media_type: mt, id, num: 0, info: info.into_ref(), tb_num: n, tb_den: d, duration }
1340 }
1341 /// Returns stream id.
1342 pub fn get_id(&self) -> u32 { self.id }
1343 /// Returns stream type.
1344 pub fn get_media_type(&self) -> StreamType { self.media_type }
1345 /// Returns stream number assigned by demuxer.
1346 pub fn get_num(&self) -> usize { self.num }
1347 /// Sets stream number.
1348 pub fn set_num(&mut self, num: usize) { self.num = num; }
1349 /// Returns codec information.
1350 pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() }
1351 /// Returns stream timebase.
1352 pub fn get_timebase(&self) -> (u32, u32) { (self.tb_num, self.tb_den) }
1353 /// Sets new stream timebase.
1354 pub fn set_timebase(&mut self, tb_num: u32, tb_den: u32) {
1355 let (n, d) = reduce_timebase(tb_num, tb_den);
1356 self.tb_num = n;
1357 self.tb_den = d;
1358 }
1359 /// Returns stream duration.
1360 pub fn get_duration(&self) -> usize { self.num }
1361 /// Converts current instance into a reference-counted one.
1362 pub fn into_ref(self) -> NAStreamRef { Arc::new(self) }
1363 }
1364
1365 impl fmt::Display for NAStream {
1366 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1367 write!(f, "({}#{} @ {}/{} - {})", self.media_type, self.id, self.tb_num, self.tb_den, self.info.get_properties())
1368 }
1369 }
1370
1371 /// Side data that may accompany demuxed data.
1372 #[derive(Clone)]
1373 pub enum NASideData {
1374 /// Palette information.
1375 ///
1376 /// This side data contains a flag signalling that palette has changed since previous time and a reference to the current palette.
1377 /// Palette is stored in 8-bit RGBA format.
1378 Palette(bool, Arc<[u8; 1024]>),
1379 /// Generic user data.
1380 UserData(Arc<Vec<u8>>),
1381 }
1382
1383 /// Packet with compressed data.
1384 #[allow(dead_code)]
1385 pub struct NAPacket {
1386 stream: NAStreamRef,
1387 /// Packet timestamp.
1388 pub ts: NATimeInfo,
1389 buffer: NABufferRef<Vec<u8>>,
1390 /// Keyframe flag.
1391 pub keyframe: bool,
1392 // options: HashMap<String, NAValue<'a>>,
1393 /// Packet side data (e.g. palette for paletted formats).
1394 pub side_data: Vec<NASideData>,
1395 }
1396
1397 impl NAPacket {
1398 /// Constructs a new `NAPacket` instance.
1399 pub fn new(str: NAStreamRef, ts: NATimeInfo, kf: bool, vec: Vec<u8>) -> Self {
1400 // let mut vec: Vec<u8> = Vec::new();
1401 // vec.resize(size, 0);
1402 NAPacket { stream: str, ts, keyframe: kf, buffer: NABufferRef::new(vec), side_data: Vec::new() }
1403 }
1404 /// Constructs a new `NAPacket` instance reusing a buffer reference.
1405 pub fn new_from_refbuf(str: NAStreamRef, ts: NATimeInfo, kf: bool, buffer: NABufferRef<Vec<u8>>) -> Self {
1406 NAPacket { stream: str, ts, keyframe: kf, buffer, side_data: Vec::new() }
1407 }
1408 /// Returns information about the stream packet belongs to.
1409 pub fn get_stream(&self) -> NAStreamRef { self.stream.clone() }
1410 /// Returns packet timestamp.
1411 pub fn get_time_information(&self) -> NATimeInfo { self.ts }
1412 /// Returns packet presentation timestamp.
1413 pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() }
1414 /// Returns packet decoding timestamp.
1415 pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() }
1416 /// Returns packet duration.
1417 pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() }
1418 /// Reports whether this is a keyframe packet.
1419 pub fn is_keyframe(&self) -> bool { self.keyframe }
1420 /// Returns a reference to packet data.
1421 pub fn get_buffer(&self) -> NABufferRef<Vec<u8>> { self.buffer.clone() }
1422 /// Adds side data for a packet.
1423 pub fn add_side_data(&mut self, side_data: NASideData) { self.side_data.push(side_data); }
1424 /// Assigns packet to a new stream.
1425 pub fn reassign(&mut self, str: NAStreamRef, ts: NATimeInfo) {
1426 self.stream = str;
1427 self.ts = ts;
1428 }
1429 }
1430
1431 impl Drop for NAPacket {
1432 fn drop(&mut self) {}
1433 }
1434
1435 impl fmt::Display for NAPacket {
1436 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1437 let mut ostr = format!("[pkt for {} size {}", self.stream, self.buffer.len());
1438 if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); }
1439 if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); }
1440 if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); }
1441 if self.keyframe { ostr = format!("{} kf", ostr); }
1442 ostr += "]";
1443 write!(f, "{}", ostr)
1444 }
1445 }
1446
1447 #[cfg(test)]
1448 mod test {
1449 use super::*;
1450
1451 #[test]
1452 fn test_time_parse() {
1453 assert_eq!(NATimePoint::PTS(42).to_string(), "42pts");
1454 assert_eq!(NATimePoint::Milliseconds(4242000).to_string(), "1:10:42");
1455 assert_eq!(NATimePoint::Milliseconds(42424242).to_string(), "11:47:04.242");
1456 let ret = NATimePoint::from_str("42pts");
1457 assert_eq!(ret.unwrap(), NATimePoint::PTS(42));
1458 let ret = NATimePoint::from_str("1:2:3");
1459 assert_eq!(ret.unwrap(), NATimePoint::Milliseconds(3723000));
1460 let ret = NATimePoint::from_str("1:2:3.42");
1461 assert_eq!(ret.unwrap(), NATimePoint::Milliseconds(3723420));
1462 }
1463 }