| 1 | //! Packets and decoded frames functionality. |
| 2 | use std::cmp::max; |
| 3 | //use std::collections::HashMap; |
| 4 | use std::fmt; |
| 5 | pub use std::sync::Arc; |
| 6 | pub use crate::formats::*; |
| 7 | pub use crate::refs::*; |
| 8 | use std::str::FromStr; |
| 9 | |
| 10 | /// Audio stream information. |
| 11 | #[allow(dead_code)] |
| 12 | #[derive(Clone,Copy,PartialEq)] |
| 13 | pub struct NAAudioInfo { |
| 14 | /// Sample rate. |
| 15 | pub sample_rate: u32, |
| 16 | /// Number of channels. |
| 17 | pub channels: u8, |
| 18 | /// Audio sample format. |
| 19 | pub format: NASoniton, |
| 20 | /// Length of one audio block in samples. |
| 21 | pub block_len: usize, |
| 22 | } |
| 23 | |
| 24 | impl NAAudioInfo { |
| 25 | /// Constructs a new `NAAudioInfo` instance. |
| 26 | pub fn new(sr: u32, ch: u8, fmt: NASoniton, bl: usize) -> Self { |
| 27 | NAAudioInfo { sample_rate: sr, channels: ch, format: fmt, block_len: bl } |
| 28 | } |
| 29 | /// Returns audio sample rate. |
| 30 | pub fn get_sample_rate(&self) -> u32 { self.sample_rate } |
| 31 | /// Returns the number of channels. |
| 32 | pub fn get_channels(&self) -> u8 { self.channels } |
| 33 | /// Returns sample format. |
| 34 | pub fn get_format(&self) -> NASoniton { self.format } |
| 35 | /// Returns one audio block duration in samples. |
| 36 | pub fn get_block_len(&self) -> usize { self.block_len } |
| 37 | } |
| 38 | |
| 39 | impl fmt::Display for NAAudioInfo { |
| 40 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 41 | write!(f, "{} Hz, {} ch", self.sample_rate, self.channels) |
| 42 | } |
| 43 | } |
| 44 | |
| 45 | /// Video stream information. |
| 46 | #[allow(dead_code)] |
| 47 | #[derive(Clone,Copy,PartialEq)] |
| 48 | pub struct NAVideoInfo { |
| 49 | /// Picture width. |
| 50 | pub width: usize, |
| 51 | /// Picture height. |
| 52 | pub height: usize, |
| 53 | /// Picture is stored downside up. |
| 54 | pub flipped: bool, |
| 55 | /// Picture pixel format. |
| 56 | pub format: NAPixelFormaton, |
| 57 | /// Declared bits per sample. |
| 58 | pub bits: u8, |
| 59 | } |
| 60 | |
| 61 | impl NAVideoInfo { |
| 62 | /// Constructs a new `NAVideoInfo` instance. |
| 63 | pub fn new(w: usize, h: usize, flip: bool, fmt: NAPixelFormaton) -> Self { |
| 64 | let bits = fmt.get_total_depth(); |
| 65 | NAVideoInfo { width: w, height: h, flipped: flip, format: fmt, bits } |
| 66 | } |
| 67 | /// Returns picture width. |
| 68 | pub fn get_width(&self) -> usize { self.width as usize } |
| 69 | /// Returns picture height. |
| 70 | pub fn get_height(&self) -> usize { self.height as usize } |
| 71 | /// Returns picture orientation. |
| 72 | pub fn is_flipped(&self) -> bool { self.flipped } |
| 73 | /// Returns picture pixel format. |
| 74 | pub fn get_format(&self) -> NAPixelFormaton { self.format } |
| 75 | /// Sets new picture width. |
| 76 | pub fn set_width(&mut self, w: usize) { self.width = w; } |
| 77 | /// Sets new picture height. |
| 78 | pub fn set_height(&mut self, h: usize) { self.height = h; } |
| 79 | } |
| 80 | |
| 81 | impl fmt::Display for NAVideoInfo { |
| 82 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 83 | write!(f, "{}x{}", self.width, self.height) |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | /// A list of possible stream information types. |
| 88 | #[derive(Clone,Copy,PartialEq)] |
| 89 | pub enum NACodecTypeInfo { |
| 90 | /// No codec present. |
| 91 | None, |
| 92 | /// Audio codec information. |
| 93 | Audio(NAAudioInfo), |
| 94 | /// Video codec information. |
| 95 | Video(NAVideoInfo), |
| 96 | } |
| 97 | |
| 98 | impl NACodecTypeInfo { |
| 99 | /// Returns video stream information. |
| 100 | pub fn get_video_info(&self) -> Option<NAVideoInfo> { |
| 101 | match *self { |
| 102 | NACodecTypeInfo::Video(vinfo) => Some(vinfo), |
| 103 | _ => None, |
| 104 | } |
| 105 | } |
| 106 | /// Returns audio stream information. |
| 107 | pub fn get_audio_info(&self) -> Option<NAAudioInfo> { |
| 108 | match *self { |
| 109 | NACodecTypeInfo::Audio(ainfo) => Some(ainfo), |
| 110 | _ => None, |
| 111 | } |
| 112 | } |
| 113 | /// Reports whether the current stream is video stream. |
| 114 | pub fn is_video(&self) -> bool { |
| 115 | match *self { |
| 116 | NACodecTypeInfo::Video(_) => true, |
| 117 | _ => false, |
| 118 | } |
| 119 | } |
| 120 | /// Reports whether the current stream is audio stream. |
| 121 | pub fn is_audio(&self) -> bool { |
| 122 | match *self { |
| 123 | NACodecTypeInfo::Audio(_) => true, |
| 124 | _ => false, |
| 125 | } |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | impl fmt::Display for NACodecTypeInfo { |
| 130 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 131 | let ret = match *self { |
| 132 | NACodecTypeInfo::None => "".to_string(), |
| 133 | NACodecTypeInfo::Audio(fmt) => format!("{}", fmt), |
| 134 | NACodecTypeInfo::Video(fmt) => format!("{}", fmt), |
| 135 | }; |
| 136 | write!(f, "{}", ret) |
| 137 | } |
| 138 | } |
| 139 | |
| 140 | /// Decoded video frame. |
| 141 | /// |
| 142 | /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer. |
| 143 | /// In case of image with several components those components are stored sequentially and can be accessed in the buffer starting at corresponding component offset. |
| 144 | #[derive(Clone)] |
| 145 | pub struct NAVideoBuffer<T> { |
| 146 | info: NAVideoInfo, |
| 147 | data: NABufferRef<Vec<T>>, |
| 148 | offs: Vec<usize>, |
| 149 | strides: Vec<usize>, |
| 150 | } |
| 151 | |
| 152 | impl<T: Clone> NAVideoBuffer<T> { |
| 153 | /// Returns the component offset (0 for all unavailable offsets). |
| 154 | pub fn get_offset(&self, idx: usize) -> usize { |
| 155 | if idx >= self.offs.len() { 0 } |
| 156 | else { self.offs[idx] } |
| 157 | } |
| 158 | /// Returns picture info. |
| 159 | pub fn get_info(&self) -> NAVideoInfo { self.info } |
| 160 | /// Returns an immutable reference to the data. |
| 161 | pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() } |
| 162 | /// Returns a mutable reference to the data. |
| 163 | pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() } |
| 164 | /// Returns the number of components in picture format. |
| 165 | pub fn get_num_components(&self) -> usize { self.offs.len() } |
| 166 | /// Creates a copy of current `NAVideoBuffer`. |
| 167 | pub fn copy_buffer(&mut self) -> Self { |
| 168 | let mut data: Vec<T> = Vec::with_capacity(self.data.len()); |
| 169 | data.clone_from(self.data.as_ref()); |
| 170 | let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len()); |
| 171 | offs.clone_from(&self.offs); |
| 172 | let mut strides: Vec<usize> = Vec::with_capacity(self.strides.len()); |
| 173 | strides.clone_from(&self.strides); |
| 174 | NAVideoBuffer { info: self.info, data: NABufferRef::new(data), offs, strides } |
| 175 | } |
| 176 | /// Returns stride (distance between subsequent lines) for the requested component. |
| 177 | pub fn get_stride(&self, idx: usize) -> usize { |
| 178 | if idx >= self.strides.len() { return 0; } |
| 179 | self.strides[idx] |
| 180 | } |
| 181 | /// Returns requested component dimensions. |
| 182 | pub fn get_dimensions(&self, idx: usize) -> (usize, usize) { |
| 183 | get_plane_size(&self.info, idx) |
| 184 | } |
| 185 | /// Converts current instance into buffer reference. |
| 186 | pub fn into_ref(self) -> NABufferRef<Self> { |
| 187 | NABufferRef::new(self) |
| 188 | } |
| 189 | |
| 190 | fn print_contents(&self, datatype: &str) { |
| 191 | println!("{} video buffer size {}", datatype, self.data.len()); |
| 192 | println!(" format {}", self.info); |
| 193 | print!(" offsets:"); |
| 194 | for off in self.offs.iter() { |
| 195 | print!(" {}", *off); |
| 196 | } |
| 197 | println!(); |
| 198 | print!(" strides:"); |
| 199 | for stride in self.strides.iter() { |
| 200 | print!(" {}", *stride); |
| 201 | } |
| 202 | println!(); |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | /// A specialised type for reference-counted `NAVideoBuffer`. |
| 207 | pub type NAVideoBufferRef<T> = NABufferRef<NAVideoBuffer<T>>; |
| 208 | |
| 209 | /// Decoded audio frame. |
| 210 | /// |
| 211 | /// NihAV frames are stored in native type (8/16/32-bit elements) inside a single buffer. |
| 212 | /// In case of planar audio samples for each channel are stored sequentially and can be accessed in the buffer starting at corresponding channel offset. |
| 213 | #[derive(Clone)] |
| 214 | pub struct NAAudioBuffer<T> { |
| 215 | info: NAAudioInfo, |
| 216 | data: NABufferRef<Vec<T>>, |
| 217 | offs: Vec<usize>, |
| 218 | stride: usize, |
| 219 | step: usize, |
| 220 | chmap: NAChannelMap, |
| 221 | len: usize, |
| 222 | } |
| 223 | |
| 224 | impl<T: Clone> NAAudioBuffer<T> { |
| 225 | /// Returns the start position of requested channel data. |
| 226 | pub fn get_offset(&self, idx: usize) -> usize { |
| 227 | if idx >= self.offs.len() { 0 } |
| 228 | else { self.offs[idx] } |
| 229 | } |
| 230 | /// Returns the distance between the start of one channel and the next one. |
| 231 | pub fn get_stride(&self) -> usize { self.stride } |
| 232 | /// Returns the distance between the samples in one channel. |
| 233 | pub fn get_step(&self) -> usize { self.step } |
| 234 | /// Returns audio format information. |
| 235 | pub fn get_info(&self) -> NAAudioInfo { self.info } |
| 236 | /// Returns channel map. |
| 237 | pub fn get_chmap(&self) -> &NAChannelMap { &self.chmap } |
| 238 | /// Returns an immutable reference to the data. |
| 239 | pub fn get_data(&self) -> &Vec<T> { self.data.as_ref() } |
| 240 | /// Returns reference to the data. |
| 241 | pub fn get_data_ref(&self) -> NABufferRef<Vec<T>> { self.data.clone() } |
| 242 | /// Returns a mutable reference to the data. |
| 243 | pub fn get_data_mut(&mut self) -> Option<&mut Vec<T>> { self.data.as_mut() } |
| 244 | /// Clones current `NAAudioBuffer` into a new one. |
| 245 | pub fn copy_buffer(&mut self) -> Self { |
| 246 | let mut data: Vec<T> = Vec::with_capacity(self.data.len()); |
| 247 | data.clone_from(self.data.as_ref()); |
| 248 | let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len()); |
| 249 | offs.clone_from(&self.offs); |
| 250 | NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride, step: self.step } |
| 251 | } |
| 252 | /// Return the length of frame in samples. |
| 253 | pub fn get_length(&self) -> usize { self.len } |
| 254 | /// Truncates buffer length if possible. |
| 255 | /// |
| 256 | /// In case when new length is larger than old length nothing is done. |
| 257 | pub fn truncate(&mut self, new_len: usize) { |
| 258 | self.len = self.len.min(new_len); |
| 259 | } |
| 260 | |
| 261 | fn print_contents(&self, datatype: &str) { |
| 262 | println!("Audio buffer with {} data, stride {}, step {}", datatype, self.stride, self.step); |
| 263 | println!(" format {}", self.info); |
| 264 | println!(" channel map {}", self.chmap); |
| 265 | print!(" offsets:"); |
| 266 | for off in self.offs.iter() { |
| 267 | print!(" {}", *off); |
| 268 | } |
| 269 | println!(); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | impl NAAudioBuffer<u8> { |
| 274 | /// Constructs a new `NAAudioBuffer` instance. |
| 275 | pub fn new_from_buf(info: NAAudioInfo, data: NABufferRef<Vec<u8>>, chmap: NAChannelMap) -> Self { |
| 276 | let len = data.len(); |
| 277 | NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0, step: 0 } |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | /// A list of possible decoded frame types. |
| 282 | #[derive(Clone)] |
| 283 | pub enum NABufferType { |
| 284 | /// 8-bit video buffer. |
| 285 | Video (NAVideoBufferRef<u8>), |
| 286 | /// 16-bit video buffer (i.e. every component or packed pixel fits into 16 bits). |
| 287 | Video16 (NAVideoBufferRef<u16>), |
| 288 | /// 32-bit video buffer (i.e. every component or packed pixel fits into 32 bits). |
| 289 | Video32 (NAVideoBufferRef<u32>), |
| 290 | /// Packed video buffer. |
| 291 | VideoPacked(NAVideoBufferRef<u8>), |
| 292 | /// Audio buffer with 8-bit unsigned integer audio. |
| 293 | AudioU8 (NAAudioBuffer<u8>), |
| 294 | /// Audio buffer with 16-bit signed integer audio. |
| 295 | AudioI16 (NAAudioBuffer<i16>), |
| 296 | /// Audio buffer with 32-bit signed integer audio. |
| 297 | AudioI32 (NAAudioBuffer<i32>), |
| 298 | /// Audio buffer with 32-bit floating point audio. |
| 299 | AudioF32 (NAAudioBuffer<f32>), |
| 300 | /// Packed audio buffer. |
| 301 | AudioPacked(NAAudioBuffer<u8>), |
| 302 | /// Buffer with generic data (e.g. subtitles). |
| 303 | Data (NABufferRef<Vec<u8>>), |
| 304 | /// No data present. |
| 305 | None, |
| 306 | } |
| 307 | |
| 308 | impl NABufferType { |
| 309 | /// Returns the offset to the requested component or channel. |
| 310 | pub fn get_offset(&self, idx: usize) -> usize { |
| 311 | match *self { |
| 312 | NABufferType::Video(ref vb) => vb.get_offset(idx), |
| 313 | NABufferType::Video16(ref vb) => vb.get_offset(idx), |
| 314 | NABufferType::Video32(ref vb) => vb.get_offset(idx), |
| 315 | NABufferType::VideoPacked(ref vb) => vb.get_offset(idx), |
| 316 | NABufferType::AudioU8(ref ab) => ab.get_offset(idx), |
| 317 | NABufferType::AudioI16(ref ab) => ab.get_offset(idx), |
| 318 | NABufferType::AudioI32(ref ab) => ab.get_offset(idx), |
| 319 | NABufferType::AudioF32(ref ab) => ab.get_offset(idx), |
| 320 | NABufferType::AudioPacked(ref ab) => ab.get_offset(idx), |
| 321 | _ => 0, |
| 322 | } |
| 323 | } |
| 324 | /// Returns information for video frames. |
| 325 | pub fn get_video_info(&self) -> Option<NAVideoInfo> { |
| 326 | match *self { |
| 327 | NABufferType::Video(ref vb) => Some(vb.get_info()), |
| 328 | NABufferType::Video16(ref vb) => Some(vb.get_info()), |
| 329 | NABufferType::Video32(ref vb) => Some(vb.get_info()), |
| 330 | NABufferType::VideoPacked(ref vb) => Some(vb.get_info()), |
| 331 | _ => None, |
| 332 | } |
| 333 | } |
| 334 | /// Returns reference to 8-bit (or packed) video buffer. |
| 335 | pub fn get_vbuf(&self) -> Option<NAVideoBufferRef<u8>> { |
| 336 | match *self { |
| 337 | NABufferType::Video(ref vb) => Some(vb.clone()), |
| 338 | NABufferType::VideoPacked(ref vb) => Some(vb.clone()), |
| 339 | _ => None, |
| 340 | } |
| 341 | } |
| 342 | /// Returns reference to 16-bit video buffer. |
| 343 | pub fn get_vbuf16(&self) -> Option<NAVideoBufferRef<u16>> { |
| 344 | match *self { |
| 345 | NABufferType::Video16(ref vb) => Some(vb.clone()), |
| 346 | _ => None, |
| 347 | } |
| 348 | } |
| 349 | /// Returns reference to 32-bit video buffer. |
| 350 | pub fn get_vbuf32(&self) -> Option<NAVideoBufferRef<u32>> { |
| 351 | match *self { |
| 352 | NABufferType::Video32(ref vb) => Some(vb.clone()), |
| 353 | _ => None, |
| 354 | } |
| 355 | } |
| 356 | /// Returns information for audio frames. |
| 357 | pub fn get_audio_info(&self) -> Option<NAAudioInfo> { |
| 358 | match *self { |
| 359 | NABufferType::AudioU8(ref ab) => Some(ab.get_info()), |
| 360 | NABufferType::AudioI16(ref ab) => Some(ab.get_info()), |
| 361 | NABufferType::AudioI32(ref ab) => Some(ab.get_info()), |
| 362 | NABufferType::AudioF32(ref ab) => Some(ab.get_info()), |
| 363 | NABufferType::AudioPacked(ref ab) => Some(ab.get_info()), |
| 364 | _ => None, |
| 365 | } |
| 366 | } |
| 367 | /// Returns audio channel map. |
| 368 | pub fn get_chmap(&self) -> Option<&NAChannelMap> { |
| 369 | match *self { |
| 370 | NABufferType::AudioU8(ref ab) => Some(ab.get_chmap()), |
| 371 | NABufferType::AudioI16(ref ab) => Some(ab.get_chmap()), |
| 372 | NABufferType::AudioI32(ref ab) => Some(ab.get_chmap()), |
| 373 | NABufferType::AudioF32(ref ab) => Some(ab.get_chmap()), |
| 374 | NABufferType::AudioPacked(ref ab) => Some(ab.get_chmap()), |
| 375 | _ => None, |
| 376 | } |
| 377 | } |
| 378 | /// Returns audio frame duration in samples. |
| 379 | pub fn get_audio_length(&self) -> usize { |
| 380 | match *self { |
| 381 | NABufferType::AudioU8(ref ab) => ab.get_length(), |
| 382 | NABufferType::AudioI16(ref ab) => ab.get_length(), |
| 383 | NABufferType::AudioI32(ref ab) => ab.get_length(), |
| 384 | NABufferType::AudioF32(ref ab) => ab.get_length(), |
| 385 | NABufferType::AudioPacked(ref ab) => ab.get_length(), |
| 386 | _ => 0, |
| 387 | } |
| 388 | } |
| 389 | /// Returns the distance between starts of two channels. |
| 390 | pub fn get_audio_stride(&self) -> usize { |
| 391 | match *self { |
| 392 | NABufferType::AudioU8(ref ab) => ab.get_stride(), |
| 393 | NABufferType::AudioI16(ref ab) => ab.get_stride(), |
| 394 | NABufferType::AudioI32(ref ab) => ab.get_stride(), |
| 395 | NABufferType::AudioF32(ref ab) => ab.get_stride(), |
| 396 | NABufferType::AudioPacked(ref ab) => ab.get_stride(), |
| 397 | _ => 0, |
| 398 | } |
| 399 | } |
| 400 | /// Returns the distance between two samples in one channel. |
| 401 | pub fn get_audio_step(&self) -> usize { |
| 402 | match *self { |
| 403 | NABufferType::AudioU8(ref ab) => ab.get_step(), |
| 404 | NABufferType::AudioI16(ref ab) => ab.get_step(), |
| 405 | NABufferType::AudioI32(ref ab) => ab.get_step(), |
| 406 | NABufferType::AudioF32(ref ab) => ab.get_step(), |
| 407 | NABufferType::AudioPacked(ref ab) => ab.get_step(), |
| 408 | _ => 0, |
| 409 | } |
| 410 | } |
| 411 | /// Returns reference to 8-bit (or packed) audio buffer. |
| 412 | pub fn get_abuf_u8(&self) -> Option<NAAudioBuffer<u8>> { |
| 413 | match *self { |
| 414 | NABufferType::AudioU8(ref ab) => Some(ab.clone()), |
| 415 | NABufferType::AudioPacked(ref ab) => Some(ab.clone()), |
| 416 | _ => None, |
| 417 | } |
| 418 | } |
| 419 | /// Returns reference to 16-bit audio buffer. |
| 420 | pub fn get_abuf_i16(&self) -> Option<NAAudioBuffer<i16>> { |
| 421 | match *self { |
| 422 | NABufferType::AudioI16(ref ab) => Some(ab.clone()), |
| 423 | _ => None, |
| 424 | } |
| 425 | } |
| 426 | /// Returns reference to 32-bit integer audio buffer. |
| 427 | pub fn get_abuf_i32(&self) -> Option<NAAudioBuffer<i32>> { |
| 428 | match *self { |
| 429 | NABufferType::AudioI32(ref ab) => Some(ab.clone()), |
| 430 | _ => None, |
| 431 | } |
| 432 | } |
| 433 | /// Returns reference to 32-bit floating point audio buffer. |
| 434 | pub fn get_abuf_f32(&self) -> Option<NAAudioBuffer<f32>> { |
| 435 | match *self { |
| 436 | NABufferType::AudioF32(ref ab) => Some(ab.clone()), |
| 437 | _ => None, |
| 438 | } |
| 439 | } |
| 440 | /// Prints internal buffer layout. |
| 441 | pub fn print_buffer_metadata(&self) { |
| 442 | match *self { |
| 443 | NABufferType::Video(ref buf) => buf.print_contents("8-bit"), |
| 444 | NABufferType::Video16(ref buf) => buf.print_contents("16-bit"), |
| 445 | NABufferType::Video32(ref buf) => buf.print_contents("32-bit"), |
| 446 | NABufferType::VideoPacked(ref buf) => buf.print_contents("packed"), |
| 447 | NABufferType::AudioU8(ref buf) => buf.print_contents("8-bit unsigned integer"), |
| 448 | NABufferType::AudioI16(ref buf) => buf.print_contents("16-bit integer"), |
| 449 | NABufferType::AudioI32(ref buf) => buf.print_contents("32-bit integer"), |
| 450 | NABufferType::AudioF32(ref buf) => buf.print_contents("32-bit float"), |
| 451 | NABufferType::AudioPacked(ref buf) => buf.print_contents("packed"), |
| 452 | NABufferType::Data(ref buf) => { println!("Data buffer, len = {}", buf.len()); }, |
| 453 | NABufferType::None => { println!("No buffer"); }, |
| 454 | }; |
| 455 | } |
| 456 | } |
| 457 | |
| 458 | const NA_SIMPLE_VFRAME_COMPONENTS: usize = 4; |
| 459 | /// Simplified decoded frame data. |
| 460 | pub struct NASimpleVideoFrame<'a, T: Copy> { |
| 461 | /// Widths of each picture component. |
| 462 | pub width: [usize; NA_SIMPLE_VFRAME_COMPONENTS], |
| 463 | /// Heights of each picture component. |
| 464 | pub height: [usize; NA_SIMPLE_VFRAME_COMPONENTS], |
| 465 | /// Orientation (upside-down or downside-up) flag. |
| 466 | pub flip: bool, |
| 467 | /// Strides for each component. |
| 468 | pub stride: [usize; NA_SIMPLE_VFRAME_COMPONENTS], |
| 469 | /// Start of each component. |
| 470 | pub offset: [usize; NA_SIMPLE_VFRAME_COMPONENTS], |
| 471 | /// Number of components. |
| 472 | pub components: usize, |
| 473 | /// Pointer to the picture pixel data. |
| 474 | pub data: &'a mut [T], |
| 475 | } |
| 476 | |
| 477 | impl<'a, T:Copy> NASimpleVideoFrame<'a, T> { |
| 478 | /// Constructs a new instance of `NASimpleVideoFrame` from `NAVideoBuffer`. |
| 479 | pub fn from_video_buf(vbuf: &'a mut NAVideoBuffer<T>) -> Option<Self> { |
| 480 | let vinfo = vbuf.get_info(); |
| 481 | let components = vinfo.format.components as usize; |
| 482 | if components > NA_SIMPLE_VFRAME_COMPONENTS { |
| 483 | return None; |
| 484 | } |
| 485 | let mut w: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS]; |
| 486 | let mut h: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS]; |
| 487 | let mut s: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS]; |
| 488 | let mut o: [usize; NA_SIMPLE_VFRAME_COMPONENTS] = [0; NA_SIMPLE_VFRAME_COMPONENTS]; |
| 489 | for comp in 0..components { |
| 490 | let (width, height) = vbuf.get_dimensions(comp); |
| 491 | w[comp] = width; |
| 492 | h[comp] = height; |
| 493 | s[comp] = vbuf.get_stride(comp); |
| 494 | o[comp] = vbuf.get_offset(comp); |
| 495 | } |
| 496 | let flip = vinfo.flipped; |
| 497 | Some(NASimpleVideoFrame { |
| 498 | width: w, |
| 499 | height: h, |
| 500 | flip, |
| 501 | stride: s, |
| 502 | offset: o, |
| 503 | components, |
| 504 | data: vbuf.data.as_mut_slice(), |
| 505 | }) |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | /// A list of possible frame allocator errors. |
| 510 | #[derive(Debug,Clone,Copy,PartialEq)] |
| 511 | pub enum AllocatorError { |
| 512 | /// Requested picture dimensions are too large. |
| 513 | TooLargeDimensions, |
| 514 | /// Invalid input format. |
| 515 | FormatError, |
| 516 | } |
| 517 | |
| 518 | /// Constructs a new video buffer with requested format. |
| 519 | /// |
| 520 | /// `align` is power of two alignment for image. E.g. the value of 5 means that frame dimensions will be padded to be multiple of 32. |
| 521 | pub fn alloc_video_buffer(vinfo: NAVideoInfo, align: u8) -> Result<NABufferType, AllocatorError> { |
| 522 | let fmt = &vinfo.format; |
| 523 | let mut new_size: usize = 0; |
| 524 | let mut offs: Vec<usize> = Vec::new(); |
| 525 | let mut strides: Vec<usize> = Vec::new(); |
| 526 | |
| 527 | for i in 0..fmt.get_num_comp() { |
| 528 | if fmt.get_chromaton(i) == None { return Err(AllocatorError::FormatError); } |
| 529 | } |
| 530 | |
| 531 | let align_mod = ((1 << align) as usize) - 1; |
| 532 | let width = ((vinfo.width as usize) + align_mod) & !align_mod; |
| 533 | let height = ((vinfo.height as usize) + align_mod) & !align_mod; |
| 534 | let mut max_depth = 0; |
| 535 | let mut all_packed = true; |
| 536 | let mut all_bytealigned = true; |
| 537 | for i in 0..fmt.get_num_comp() { |
| 538 | let ochr = fmt.get_chromaton(i); |
| 539 | if ochr.is_none() { continue; } |
| 540 | let chr = ochr.unwrap(); |
| 541 | if !chr.is_packed() { |
| 542 | all_packed = false; |
| 543 | } else if ((chr.get_shift() + chr.get_depth()) & 7) != 0 { |
| 544 | all_bytealigned = false; |
| 545 | } |
| 546 | max_depth = max(max_depth, chr.get_depth()); |
| 547 | } |
| 548 | let unfit_elem_size = match fmt.get_elem_size() { |
| 549 | 2 | 4 => false, |
| 550 | _ => true, |
| 551 | }; |
| 552 | |
| 553 | //todo semi-packed like NV12 |
| 554 | if fmt.is_paletted() { |
| 555 | //todo various-sized palettes? |
| 556 | let stride = vinfo.get_format().get_chromaton(0).unwrap().get_linesize(width); |
| 557 | let pic_sz = stride.checked_mul(height); |
| 558 | if pic_sz == None { return Err(AllocatorError::TooLargeDimensions); } |
| 559 | let pal_size = 256 * (fmt.get_elem_size() as usize); |
| 560 | let new_size = pic_sz.unwrap().checked_add(pal_size); |
| 561 | if new_size == None { return Err(AllocatorError::TooLargeDimensions); } |
| 562 | offs.push(0); |
| 563 | offs.push(stride * height); |
| 564 | strides.push(stride); |
| 565 | let data: Vec<u8> = vec![0; new_size.unwrap()]; |
| 566 | let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides }; |
| 567 | Ok(NABufferType::Video(buf.into_ref())) |
| 568 | } else if !all_packed { |
| 569 | for i in 0..fmt.get_num_comp() { |
| 570 | let ochr = fmt.get_chromaton(i); |
| 571 | if ochr.is_none() { continue; } |
| 572 | let chr = ochr.unwrap(); |
| 573 | offs.push(new_size as usize); |
| 574 | let stride = chr.get_linesize(width); |
| 575 | let cur_h = chr.get_height(height); |
| 576 | let cur_sz = stride.checked_mul(cur_h); |
| 577 | if cur_sz == None { return Err(AllocatorError::TooLargeDimensions); } |
| 578 | let new_sz = new_size.checked_add(cur_sz.unwrap()); |
| 579 | if new_sz == None { return Err(AllocatorError::TooLargeDimensions); } |
| 580 | new_size = new_sz.unwrap(); |
| 581 | strides.push(stride); |
| 582 | } |
| 583 | if max_depth <= 8 { |
| 584 | let data: Vec<u8> = vec![0; new_size]; |
| 585 | let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides }; |
| 586 | Ok(NABufferType::Video(buf.into_ref())) |
| 587 | } else if max_depth <= 16 { |
| 588 | let data: Vec<u16> = vec![0; new_size]; |
| 589 | let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides }; |
| 590 | Ok(NABufferType::Video16(buf.into_ref())) |
| 591 | } else { |
| 592 | let data: Vec<u32> = vec![0; new_size]; |
| 593 | let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides }; |
| 594 | Ok(NABufferType::Video32(buf.into_ref())) |
| 595 | } |
| 596 | } else if all_bytealigned || unfit_elem_size { |
| 597 | let elem_sz = fmt.get_elem_size(); |
| 598 | let line_sz = width.checked_mul(elem_sz as usize); |
| 599 | if line_sz == None { return Err(AllocatorError::TooLargeDimensions); } |
| 600 | let new_sz = line_sz.unwrap().checked_mul(height); |
| 601 | if new_sz == None { return Err(AllocatorError::TooLargeDimensions); } |
| 602 | new_size = new_sz.unwrap(); |
| 603 | let data: Vec<u8> = vec![0; new_size]; |
| 604 | strides.push(line_sz.unwrap()); |
| 605 | let buf: NAVideoBuffer<u8> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides }; |
| 606 | Ok(NABufferType::VideoPacked(buf.into_ref())) |
| 607 | } else { |
| 608 | let elem_sz = fmt.get_elem_size(); |
| 609 | let new_sz = width.checked_mul(height); |
| 610 | if new_sz == None { return Err(AllocatorError::TooLargeDimensions); } |
| 611 | new_size = new_sz.unwrap(); |
| 612 | match elem_sz { |
| 613 | 2 => { |
| 614 | let data: Vec<u16> = vec![0; new_size]; |
| 615 | strides.push(width); |
| 616 | let buf: NAVideoBuffer<u16> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides }; |
| 617 | Ok(NABufferType::Video16(buf.into_ref())) |
| 618 | }, |
| 619 | 4 => { |
| 620 | let data: Vec<u32> = vec![0; new_size]; |
| 621 | strides.push(width); |
| 622 | let buf: NAVideoBuffer<u32> = NAVideoBuffer { data: NABufferRef::new(data), info: vinfo, offs, strides }; |
| 623 | Ok(NABufferType::Video32(buf.into_ref())) |
| 624 | }, |
| 625 | _ => unreachable!(), |
| 626 | } |
| 627 | } |
| 628 | } |
| 629 | |
| 630 | /// Constructs a new audio buffer for the requested format and length. |
| 631 | #[allow(clippy::collapsible_if)] |
| 632 | pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelMap) -> Result<NABufferType, AllocatorError> { |
| 633 | let mut offs: Vec<usize> = Vec::new(); |
| 634 | if ainfo.format.is_planar() || ((ainfo.format.get_bits() % 8) == 0) { |
| 635 | let len = nsamples.checked_mul(ainfo.channels as usize); |
| 636 | if len == None { return Err(AllocatorError::TooLargeDimensions); } |
| 637 | let length = len.unwrap(); |
| 638 | let stride; |
| 639 | let step; |
| 640 | if ainfo.format.is_planar() { |
| 641 | stride = nsamples; |
| 642 | step = 1; |
| 643 | for i in 0..ainfo.channels { |
| 644 | offs.push((i as usize) * stride); |
| 645 | } |
| 646 | } else { |
| 647 | stride = 1; |
| 648 | step = ainfo.channels as usize; |
| 649 | for i in 0..ainfo.channels { |
| 650 | offs.push(i as usize); |
| 651 | } |
| 652 | } |
| 653 | if ainfo.format.is_float() { |
| 654 | if ainfo.format.get_bits() == 32 { |
| 655 | let data: Vec<f32> = vec![0.0; length]; |
| 656 | let buf: NAAudioBuffer<f32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step }; |
| 657 | Ok(NABufferType::AudioF32(buf)) |
| 658 | } else { |
| 659 | Err(AllocatorError::TooLargeDimensions) |
| 660 | } |
| 661 | } else { |
| 662 | if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() { |
| 663 | let data: Vec<u8> = vec![0; length]; |
| 664 | let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step }; |
| 665 | Ok(NABufferType::AudioU8(buf)) |
| 666 | } else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() { |
| 667 | let data: Vec<i16> = vec![0; length]; |
| 668 | let buf: NAAudioBuffer<i16> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step }; |
| 669 | Ok(NABufferType::AudioI16(buf)) |
| 670 | } else if ainfo.format.get_bits() == 32 && ainfo.format.is_signed() { |
| 671 | let data: Vec<i32> = vec![0; length]; |
| 672 | let buf: NAAudioBuffer<i32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step }; |
| 673 | Ok(NABufferType::AudioI32(buf)) |
| 674 | } else { |
| 675 | Err(AllocatorError::TooLargeDimensions) |
| 676 | } |
| 677 | } |
| 678 | } else { |
| 679 | let len = nsamples.checked_mul(ainfo.channels as usize); |
| 680 | if len == None { return Err(AllocatorError::TooLargeDimensions); } |
| 681 | let length = ainfo.format.get_audio_size(len.unwrap() as u64); |
| 682 | let data: Vec<u8> = vec![0; length]; |
| 683 | let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0, step: 0 }; |
| 684 | Ok(NABufferType::AudioPacked(buf)) |
| 685 | } |
| 686 | } |
| 687 | |
| 688 | /// Constructs a new buffer for generic data. |
| 689 | pub fn alloc_data_buffer(size: usize) -> Result<NABufferType, AllocatorError> { |
| 690 | let data: Vec<u8> = vec![0; size]; |
| 691 | let buf: NABufferRef<Vec<u8>> = NABufferRef::new(data); |
| 692 | Ok(NABufferType::Data(buf)) |
| 693 | } |
| 694 | |
| 695 | /// Creates a clone of current buffer. |
| 696 | pub fn copy_buffer(buf: &NABufferType) -> NABufferType { |
| 697 | buf.clone() |
| 698 | } |
| 699 | |
| 700 | /// Video frame pool. |
| 701 | /// |
| 702 | /// This structure allows codec to effectively reuse old frames instead of allocating and de-allocating frames every time. |
| 703 | /// Caller can also reserve some frames for its own purposes e.g. display queue. |
| 704 | pub struct NAVideoBufferPool<T:Copy> { |
| 705 | pool: Vec<NAVideoBufferRef<T>>, |
| 706 | max_len: usize, |
| 707 | add_len: usize, |
| 708 | } |
| 709 | |
| 710 | impl<T:Copy> NAVideoBufferPool<T> { |
| 711 | /// Constructs a new `NAVideoBufferPool` instance. |
| 712 | pub fn new(max_len: usize) -> Self { |
| 713 | Self { |
| 714 | pool: Vec::with_capacity(max_len), |
| 715 | max_len, |
| 716 | add_len: 0, |
| 717 | } |
| 718 | } |
| 719 | /// Sets the number of buffers reserved for the user. |
| 720 | pub fn set_dec_bufs(&mut self, add_len: usize) { |
| 721 | self.add_len = add_len; |
| 722 | } |
| 723 | /// Returns an unused buffer from the pool. |
| 724 | pub fn get_free(&mut self) -> Option<NAVideoBufferRef<T>> { |
| 725 | for e in self.pool.iter() { |
| 726 | if e.get_num_refs() == 1 { |
| 727 | return Some(e.clone()); |
| 728 | } |
| 729 | } |
| 730 | None |
| 731 | } |
| 732 | /// Clones provided frame data into a free pool frame. |
| 733 | pub fn get_copy(&mut self, rbuf: &NAVideoBufferRef<T>) -> Option<NAVideoBufferRef<T>> { |
| 734 | let mut dbuf = self.get_free()?; |
| 735 | dbuf.data.copy_from_slice(&rbuf.data); |
| 736 | Some(dbuf) |
| 737 | } |
| 738 | /// Clears the pool from all frames. |
| 739 | pub fn reset(&mut self) { |
| 740 | self.pool.truncate(0); |
| 741 | } |
| 742 | } |
| 743 | |
| 744 | impl NAVideoBufferPool<u8> { |
| 745 | /// Allocates the target amount of video frames using [`alloc_video_buffer`]. |
| 746 | /// |
| 747 | /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html |
| 748 | pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> { |
| 749 | let nbufs = self.max_len + self.add_len - self.pool.len(); |
| 750 | for _ in 0..nbufs { |
| 751 | let vbuf = alloc_video_buffer(vinfo, align)?; |
| 752 | if let NABufferType::Video(buf) = vbuf { |
| 753 | self.pool.push(buf); |
| 754 | } else if let NABufferType::VideoPacked(buf) = vbuf { |
| 755 | self.pool.push(buf); |
| 756 | } else { |
| 757 | return Err(AllocatorError::FormatError); |
| 758 | } |
| 759 | } |
| 760 | Ok(()) |
| 761 | } |
| 762 | } |
| 763 | |
| 764 | impl NAVideoBufferPool<u16> { |
| 765 | /// Allocates the target amount of video frames using [`alloc_video_buffer`]. |
| 766 | /// |
| 767 | /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html |
| 768 | pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> { |
| 769 | let nbufs = self.max_len + self.add_len - self.pool.len(); |
| 770 | for _ in 0..nbufs { |
| 771 | let vbuf = alloc_video_buffer(vinfo, align)?; |
| 772 | if let NABufferType::Video16(buf) = vbuf { |
| 773 | self.pool.push(buf); |
| 774 | } else { |
| 775 | return Err(AllocatorError::FormatError); |
| 776 | } |
| 777 | } |
| 778 | Ok(()) |
| 779 | } |
| 780 | } |
| 781 | |
| 782 | impl NAVideoBufferPool<u32> { |
| 783 | /// Allocates the target amount of video frames using [`alloc_video_buffer`]. |
| 784 | /// |
| 785 | /// [`alloc_video_buffer`]: ./fn.alloc_video_buffer.html |
| 786 | pub fn prealloc_video(&mut self, vinfo: NAVideoInfo, align: u8) -> Result<(), AllocatorError> { |
| 787 | let nbufs = self.max_len + self.add_len - self.pool.len(); |
| 788 | for _ in 0..nbufs { |
| 789 | let vbuf = alloc_video_buffer(vinfo, align)?; |
| 790 | if let NABufferType::Video32(buf) = vbuf { |
| 791 | self.pool.push(buf); |
| 792 | } else { |
| 793 | return Err(AllocatorError::FormatError); |
| 794 | } |
| 795 | } |
| 796 | Ok(()) |
| 797 | } |
| 798 | } |
| 799 | |
| 800 | /// Information about codec contained in a stream. |
| 801 | #[allow(dead_code)] |
| 802 | #[derive(Clone)] |
| 803 | pub struct NACodecInfo { |
| 804 | name: &'static str, |
| 805 | properties: NACodecTypeInfo, |
| 806 | extradata: Option<Arc<Vec<u8>>>, |
| 807 | } |
| 808 | |
| 809 | /// A specialised type for reference-counted `NACodecInfo`. |
| 810 | pub type NACodecInfoRef = Arc<NACodecInfo>; |
| 811 | |
| 812 | impl NACodecInfo { |
| 813 | /// Constructs a new instance of `NACodecInfo`. |
| 814 | pub fn new(name: &'static str, p: NACodecTypeInfo, edata: Option<Vec<u8>>) -> Self { |
| 815 | let extradata = match edata { |
| 816 | None => None, |
| 817 | Some(vec) => Some(Arc::new(vec)), |
| 818 | }; |
| 819 | NACodecInfo { name, properties: p, extradata } |
| 820 | } |
| 821 | /// Constructs a new reference-counted instance of `NACodecInfo`. |
| 822 | pub fn new_ref(name: &'static str, p: NACodecTypeInfo, edata: Option<Arc<Vec<u8>>>) -> Self { |
| 823 | NACodecInfo { name, properties: p, extradata: edata } |
| 824 | } |
| 825 | /// Converts current instance into a reference-counted one. |
| 826 | pub fn into_ref(self) -> NACodecInfoRef { Arc::new(self) } |
| 827 | /// Returns codec information. |
| 828 | pub fn get_properties(&self) -> NACodecTypeInfo { self.properties } |
| 829 | /// Returns additional initialisation data required by the codec. |
| 830 | pub fn get_extradata(&self) -> Option<Arc<Vec<u8>>> { |
| 831 | if let Some(ref vec) = self.extradata { return Some(vec.clone()); } |
| 832 | None |
| 833 | } |
| 834 | /// Returns codec name. |
| 835 | pub fn get_name(&self) -> &'static str { self.name } |
| 836 | /// Reports whether it is a video codec. |
| 837 | pub fn is_video(&self) -> bool { |
| 838 | if let NACodecTypeInfo::Video(_) = self.properties { return true; } |
| 839 | false |
| 840 | } |
| 841 | /// Reports whether it is an audio codec. |
| 842 | pub fn is_audio(&self) -> bool { |
| 843 | if let NACodecTypeInfo::Audio(_) = self.properties { return true; } |
| 844 | false |
| 845 | } |
| 846 | /// Constructs a new empty reference-counted instance of `NACodecInfo`. |
| 847 | pub fn new_dummy() -> Arc<Self> { |
| 848 | Arc::new(DUMMY_CODEC_INFO) |
| 849 | } |
| 850 | /// Updates codec infomation. |
| 851 | pub fn replace_info(&self, p: NACodecTypeInfo) -> Arc<Self> { |
| 852 | Arc::new(NACodecInfo { name: self.name, properties: p, extradata: self.extradata.clone() }) |
| 853 | } |
| 854 | } |
| 855 | |
| 856 | impl Default for NACodecInfo { |
| 857 | fn default() -> Self { DUMMY_CODEC_INFO } |
| 858 | } |
| 859 | |
| 860 | impl fmt::Display for NACodecInfo { |
| 861 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 862 | let edata = match self.extradata.clone() { |
| 863 | None => "no extradata".to_string(), |
| 864 | Some(v) => format!("{} byte(s) of extradata", v.len()), |
| 865 | }; |
| 866 | write!(f, "{}: {} {}", self.name, self.properties, edata) |
| 867 | } |
| 868 | } |
| 869 | |
| 870 | /// Default empty codec information. |
| 871 | pub const DUMMY_CODEC_INFO: NACodecInfo = NACodecInfo { |
| 872 | name: "none", |
| 873 | properties: NACodecTypeInfo::None, |
| 874 | extradata: None }; |
| 875 | |
| 876 | /// A list of recognized frame types. |
| 877 | #[derive(Debug,Clone,Copy,PartialEq)] |
| 878 | #[allow(dead_code)] |
| 879 | pub enum FrameType { |
| 880 | /// Intra frame type. |
| 881 | I, |
| 882 | /// Inter frame type. |
| 883 | P, |
| 884 | /// Bidirectionally predicted frame. |
| 885 | B, |
| 886 | /// Skip frame. |
| 887 | /// |
| 888 | /// When such frame is encountered then last frame should be used again if it is needed. |
| 889 | Skip, |
| 890 | /// Some other frame type. |
| 891 | Other, |
| 892 | } |
| 893 | |
| 894 | impl fmt::Display for FrameType { |
| 895 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 896 | match *self { |
| 897 | FrameType::I => write!(f, "I"), |
| 898 | FrameType::P => write!(f, "P"), |
| 899 | FrameType::B => write!(f, "B"), |
| 900 | FrameType::Skip => write!(f, "skip"), |
| 901 | FrameType::Other => write!(f, "x"), |
| 902 | } |
| 903 | } |
| 904 | } |
| 905 | |
| 906 | /// Timestamp information. |
| 907 | #[derive(Debug,Clone,Copy)] |
| 908 | pub struct NATimeInfo { |
| 909 | /// Presentation timestamp. |
| 910 | pub pts: Option<u64>, |
| 911 | /// Decode timestamp. |
| 912 | pub dts: Option<u64>, |
| 913 | /// Duration (in timebase units). |
| 914 | pub duration: Option<u64>, |
| 915 | /// Timebase numerator. |
| 916 | pub tb_num: u32, |
| 917 | /// Timebase denominator. |
| 918 | pub tb_den: u32, |
| 919 | } |
| 920 | |
| 921 | impl NATimeInfo { |
| 922 | /// Constructs a new `NATimeInfo` instance. |
| 923 | pub fn new(pts: Option<u64>, dts: Option<u64>, duration: Option<u64>, tb_num: u32, tb_den: u32) -> Self { |
| 924 | NATimeInfo { pts, dts, duration, tb_num, tb_den } |
| 925 | } |
| 926 | /// Returns presentation timestamp. |
| 927 | pub fn get_pts(&self) -> Option<u64> { self.pts } |
| 928 | /// Returns decoding timestamp. |
| 929 | pub fn get_dts(&self) -> Option<u64> { self.dts } |
| 930 | /// Returns duration. |
| 931 | pub fn get_duration(&self) -> Option<u64> { self.duration } |
| 932 | /// Sets new presentation timestamp. |
| 933 | pub fn set_pts(&mut self, pts: Option<u64>) { self.pts = pts; } |
| 934 | /// Sets new decoding timestamp. |
| 935 | pub fn set_dts(&mut self, dts: Option<u64>) { self.dts = dts; } |
| 936 | /// Sets new duration. |
| 937 | pub fn set_duration(&mut self, dur: Option<u64>) { self.duration = dur; } |
| 938 | |
| 939 | /// Converts time in given scale into timestamp in given base. |
| 940 | #[allow(clippy::collapsible_if)] |
| 941 | pub fn time_to_ts(time: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 { |
| 942 | let tb_num = u64::from(tb_num); |
| 943 | let tb_den = u64::from(tb_den); |
| 944 | let tmp = time.checked_mul(tb_den); |
| 945 | if let Some(tmp) = tmp { |
| 946 | tmp / base / tb_num |
| 947 | } else { |
| 948 | if tb_num < base { |
| 949 | let coarse = time / tb_num; |
| 950 | if let Some(tmp) = coarse.checked_mul(tb_den) { |
| 951 | tmp / base |
| 952 | } else { |
| 953 | (coarse / base) * tb_den |
| 954 | } |
| 955 | } else { |
| 956 | let coarse = time / base; |
| 957 | if let Some(tmp) = coarse.checked_mul(tb_den) { |
| 958 | tmp / tb_num |
| 959 | } else { |
| 960 | (coarse / tb_num) * tb_den |
| 961 | } |
| 962 | } |
| 963 | } |
| 964 | } |
| 965 | /// Converts timestamp in given base into time in given scale. |
| 966 | pub fn ts_to_time(ts: u64, base: u64, tb_num: u32, tb_den: u32) -> u64 { |
| 967 | let tb_num = u64::from(tb_num); |
| 968 | let tb_den = u64::from(tb_den); |
| 969 | let tmp = ts.checked_mul(base); |
| 970 | if let Some(tmp) = tmp { |
| 971 | let tmp2 = tmp.checked_mul(tb_num); |
| 972 | if let Some(tmp2) = tmp2 { |
| 973 | tmp2 / tb_den |
| 974 | } else { |
| 975 | (tmp / tb_den) * tb_num |
| 976 | } |
| 977 | } else { |
| 978 | let tmp = ts.checked_mul(tb_num); |
| 979 | if let Some(tmp) = tmp { |
| 980 | (tmp / tb_den) * base |
| 981 | } else { |
| 982 | (ts / tb_den) * base * tb_num |
| 983 | } |
| 984 | } |
| 985 | } |
| 986 | fn get_cur_ts(&self) -> u64 { self.pts.unwrap_or_else(|| self.dts.unwrap_or(0)) } |
| 987 | fn get_cur_millis(&self) -> u64 { |
| 988 | let ts = self.get_cur_ts(); |
| 989 | Self::ts_to_time(ts, 1000, self.tb_num, self.tb_den) |
| 990 | } |
| 991 | /// Checks whether the current time information is earler than provided reference time. |
| 992 | pub fn less_than(&self, time: NATimePoint) -> bool { |
| 993 | if self.pts.is_none() && self.dts.is_none() { |
| 994 | return true; |
| 995 | } |
| 996 | match time { |
| 997 | NATimePoint::PTS(rpts) => self.get_cur_ts() < rpts, |
| 998 | NATimePoint::Milliseconds(ms) => self.get_cur_millis() < ms, |
| 999 | NATimePoint::None => false, |
| 1000 | } |
| 1001 | } |
| 1002 | /// Checks whether the current time information is the same as provided reference time. |
| 1003 | pub fn equal(&self, time: NATimePoint) -> bool { |
| 1004 | if self.pts.is_none() && self.dts.is_none() { |
| 1005 | return time == NATimePoint::None; |
| 1006 | } |
| 1007 | match time { |
| 1008 | NATimePoint::PTS(rpts) => self.get_cur_ts() == rpts, |
| 1009 | NATimePoint::Milliseconds(ms) => self.get_cur_millis() == ms, |
| 1010 | NATimePoint::None => false, |
| 1011 | } |
| 1012 | } |
| 1013 | } |
| 1014 | |
| 1015 | /// Time information for specifying durations or seek positions. |
| 1016 | #[derive(Clone,Copy,Debug,PartialEq)] |
| 1017 | pub enum NATimePoint { |
| 1018 | /// Time in milliseconds. |
| 1019 | Milliseconds(u64), |
| 1020 | /// Stream timestamp. |
| 1021 | PTS(u64), |
| 1022 | /// No time information present. |
| 1023 | None, |
| 1024 | } |
| 1025 | |
| 1026 | impl Default for NATimePoint { |
| 1027 | fn default() -> Self { |
| 1028 | NATimePoint::None |
| 1029 | } |
| 1030 | } |
| 1031 | |
| 1032 | impl fmt::Display for NATimePoint { |
| 1033 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 1034 | match *self { |
| 1035 | NATimePoint::Milliseconds(millis) => { |
| 1036 | let tot_s = millis / 1000; |
| 1037 | let ms = millis % 1000; |
| 1038 | if tot_s < 60 { |
| 1039 | if ms != 0 { |
| 1040 | return write!(f, "{}.{:03}", tot_s, ms); |
| 1041 | } else { |
| 1042 | return write!(f, "{}", tot_s); |
| 1043 | } |
| 1044 | } |
| 1045 | let tot_m = tot_s / 60; |
| 1046 | let s = tot_s % 60; |
| 1047 | if tot_m < 60 { |
| 1048 | if ms != 0 { |
| 1049 | return write!(f, "{}:{:02}.{:03}", tot_m, s, ms); |
| 1050 | } else { |
| 1051 | return write!(f, "{}:{:02}", tot_m, s); |
| 1052 | } |
| 1053 | } |
| 1054 | let h = tot_m / 60; |
| 1055 | let m = tot_m % 60; |
| 1056 | if ms != 0 { |
| 1057 | write!(f, "{}:{:02}:{:02}.{:03}", h, m, s, ms) |
| 1058 | } else { |
| 1059 | write!(f, "{}:{:02}:{:02}", h, m, s) |
| 1060 | } |
| 1061 | }, |
| 1062 | NATimePoint::PTS(pts) => { |
| 1063 | write!(f, "{}pts", pts) |
| 1064 | }, |
| 1065 | NATimePoint::None => { |
| 1066 | write!(f, "none") |
| 1067 | }, |
| 1068 | } |
| 1069 | } |
| 1070 | } |
| 1071 | |
| 1072 | impl FromStr for NATimePoint { |
| 1073 | type Err = FormatParseError; |
| 1074 | |
| 1075 | /// Parses the string into time information. |
| 1076 | /// |
| 1077 | /// Accepted formats are `<u64>pts`, `<u64>ms` or `[hh:][mm:]ss[.ms]`. |
| 1078 | fn from_str(s: &str) -> Result<Self, Self::Err> { |
| 1079 | if s.is_empty() { |
| 1080 | return Err(FormatParseError {}); |
| 1081 | } |
| 1082 | if !s.ends_with("pts") { |
| 1083 | if s.ends_with("ms") { |
| 1084 | let str_b = s.as_bytes(); |
| 1085 | let num = std::str::from_utf8(&str_b[..str_b.len() - 2]).unwrap(); |
| 1086 | let ret = num.parse::<u64>(); |
| 1087 | if let Ok(val) = ret { |
| 1088 | return Ok(NATimePoint::Milliseconds(val)); |
| 1089 | } else { |
| 1090 | return Err(FormatParseError {}); |
| 1091 | } |
| 1092 | } |
| 1093 | let mut parts = s.split(':'); |
| 1094 | let mut hrs = None; |
| 1095 | let mut mins = None; |
| 1096 | let mut secs = parts.next(); |
| 1097 | if let Some(part) = parts.next() { |
| 1098 | std::mem::swap(&mut mins, &mut secs); |
| 1099 | secs = Some(part); |
| 1100 | } |
| 1101 | if let Some(part) = parts.next() { |
| 1102 | std::mem::swap(&mut hrs, &mut mins); |
| 1103 | std::mem::swap(&mut mins, &mut secs); |
| 1104 | secs = Some(part); |
| 1105 | } |
| 1106 | if parts.next().is_some() { |
| 1107 | return Err(FormatParseError {}); |
| 1108 | } |
| 1109 | let hours = if let Some(val) = hrs { |
| 1110 | let ret = val.parse::<u64>(); |
| 1111 | if ret.is_err() { return Err(FormatParseError {}); } |
| 1112 | let val = ret.unwrap(); |
| 1113 | if val > 1000 { return Err(FormatParseError {}); } |
| 1114 | val |
| 1115 | } else { 0 }; |
| 1116 | let minutes = if let Some(val) = mins { |
| 1117 | let ret = val.parse::<u64>(); |
| 1118 | if ret.is_err() { return Err(FormatParseError {}); } |
| 1119 | let val = ret.unwrap(); |
| 1120 | if val >= 60 { return Err(FormatParseError {}); } |
| 1121 | val |
| 1122 | } else { 0 }; |
| 1123 | let (seconds, millis) = if let Some(val) = secs { |
| 1124 | let mut parts = val.split('.'); |
| 1125 | let ret = parts.next().unwrap().parse::<u64>(); |
| 1126 | if ret.is_err() { return Err(FormatParseError {}); } |
| 1127 | let seconds = ret.unwrap(); |
| 1128 | if mins.is_some() && seconds >= 60 { return Err(FormatParseError {}); } |
| 1129 | let millis = if let Some(val) = parts.next() { |
| 1130 | let mut mval = 0; |
| 1131 | let mut base = 0; |
| 1132 | for ch in val.chars() { |
| 1133 | if ch >= '0' && ch <= '9' { |
| 1134 | mval = mval * 10 + u64::from((ch as u8) - b'0'); |
| 1135 | base += 1; |
| 1136 | if base > 3 { break; } |
| 1137 | } else { |
| 1138 | return Err(FormatParseError {}); |
| 1139 | } |
| 1140 | } |
| 1141 | while base < 3 { |
| 1142 | mval *= 10; |
| 1143 | base += 1; |
| 1144 | } |
| 1145 | mval |
| 1146 | } else { 0 }; |
| 1147 | (seconds, millis) |
| 1148 | } else { unreachable!(); }; |
| 1149 | let tot_secs = hours * 60 * 60 + minutes * 60 + seconds; |
| 1150 | Ok(NATimePoint::Milliseconds(tot_secs * 1000 + millis)) |
| 1151 | } else { |
| 1152 | let str_b = s.as_bytes(); |
| 1153 | let num = std::str::from_utf8(&str_b[..str_b.len() - 3]).unwrap(); |
| 1154 | let ret = num.parse::<u64>(); |
| 1155 | if let Ok(val) = ret { |
| 1156 | Ok(NATimePoint::PTS(val)) |
| 1157 | } else { |
| 1158 | Err(FormatParseError {}) |
| 1159 | } |
| 1160 | } |
| 1161 | } |
| 1162 | } |
| 1163 | |
| 1164 | /// Decoded frame information. |
| 1165 | #[allow(dead_code)] |
| 1166 | #[derive(Clone)] |
| 1167 | pub struct NAFrame { |
| 1168 | /// Frame timestamp. |
| 1169 | pub ts: NATimeInfo, |
| 1170 | /// Frame ID. |
| 1171 | pub id: i64, |
| 1172 | buffer: NABufferType, |
| 1173 | info: NACodecInfoRef, |
| 1174 | /// Frame type. |
| 1175 | pub frame_type: FrameType, |
| 1176 | /// Keyframe flag. |
| 1177 | pub key: bool, |
| 1178 | // options: HashMap<String, NAValue>, |
| 1179 | } |
| 1180 | |
| 1181 | /// A specialised type for reference-counted `NAFrame`. |
| 1182 | pub type NAFrameRef = Arc<NAFrame>; |
| 1183 | |
| 1184 | fn get_plane_size(info: &NAVideoInfo, idx: usize) -> (usize, usize) { |
| 1185 | let chromaton = info.get_format().get_chromaton(idx); |
| 1186 | if chromaton.is_none() { return (0, 0); } |
| 1187 | let (hs, vs) = chromaton.unwrap().get_subsampling(); |
| 1188 | let w = (info.get_width() + ((1 << hs) - 1)) >> hs; |
| 1189 | let h = (info.get_height() + ((1 << vs) - 1)) >> vs; |
| 1190 | (w, h) |
| 1191 | } |
| 1192 | |
| 1193 | impl NAFrame { |
| 1194 | /// Constructs a new `NAFrame` instance. |
| 1195 | pub fn new(ts: NATimeInfo, |
| 1196 | ftype: FrameType, |
| 1197 | keyframe: bool, |
| 1198 | info: NACodecInfoRef, |
| 1199 | /*options: HashMap<String, NAValue>,*/ |
| 1200 | buffer: NABufferType) -> Self { |
| 1201 | NAFrame { ts, id: 0, buffer, info, frame_type: ftype, key: keyframe/*, options*/ } |
| 1202 | } |
| 1203 | /// Returns frame format information. |
| 1204 | pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() } |
| 1205 | /// Returns frame type. |
| 1206 | pub fn get_frame_type(&self) -> FrameType { self.frame_type } |
| 1207 | /// Reports whether the frame is a keyframe. |
| 1208 | pub fn is_keyframe(&self) -> bool { self.key } |
| 1209 | /// Sets new frame type. |
| 1210 | pub fn set_frame_type(&mut self, ftype: FrameType) { self.frame_type = ftype; } |
| 1211 | /// Sets keyframe flag. |
| 1212 | pub fn set_keyframe(&mut self, key: bool) { self.key = key; } |
| 1213 | /// Returns frame timestamp. |
| 1214 | pub fn get_time_information(&self) -> NATimeInfo { self.ts } |
| 1215 | /// Returns frame presentation time. |
| 1216 | pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() } |
| 1217 | /// Returns frame decoding time. |
| 1218 | pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() } |
| 1219 | /// Returns picture ID. |
| 1220 | pub fn get_id(&self) -> i64 { self.id } |
| 1221 | /// Returns frame display duration. |
| 1222 | pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() } |
| 1223 | /// Sets new presentation timestamp. |
| 1224 | pub fn set_pts(&mut self, pts: Option<u64>) { self.ts.set_pts(pts); } |
| 1225 | /// Sets new decoding timestamp. |
| 1226 | pub fn set_dts(&mut self, dts: Option<u64>) { self.ts.set_dts(dts); } |
| 1227 | /// Sets new picture ID. |
| 1228 | pub fn set_id(&mut self, id: i64) { self.id = id; } |
| 1229 | /// Sets new duration. |
| 1230 | pub fn set_duration(&mut self, dur: Option<u64>) { self.ts.set_duration(dur); } |
| 1231 | |
| 1232 | /// Returns a reference to the frame data. |
| 1233 | pub fn get_buffer(&self) -> NABufferType { self.buffer.clone() } |
| 1234 | |
| 1235 | /// Converts current instance into a reference-counted one. |
| 1236 | pub fn into_ref(self) -> NAFrameRef { Arc::new(self) } |
| 1237 | |
| 1238 | /// Creates new frame with metadata from `NAPacket`. |
| 1239 | pub fn new_from_pkt(pkt: &NAPacket, info: NACodecInfoRef, buf: NABufferType) -> NAFrame { |
| 1240 | NAFrame::new(pkt.ts, FrameType::Other, pkt.keyframe, info, /*HashMap::new(),*/ buf) |
| 1241 | } |
| 1242 | } |
| 1243 | |
| 1244 | impl fmt::Display for NAFrame { |
| 1245 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 1246 | let mut ostr = format!("frame type {}", self.frame_type); |
| 1247 | if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); } |
| 1248 | if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); } |
| 1249 | if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); } |
| 1250 | if self.key { ostr = format!("{} kf", ostr); } |
| 1251 | write!(f, "[{}]", ostr) |
| 1252 | } |
| 1253 | } |
| 1254 | |
| 1255 | /// A list of possible stream types. |
| 1256 | #[derive(Debug,Clone,Copy,PartialEq)] |
| 1257 | #[allow(dead_code)] |
| 1258 | pub enum StreamType { |
| 1259 | /// Video stream. |
| 1260 | Video, |
| 1261 | /// Audio stream. |
| 1262 | Audio, |
| 1263 | /// Subtitles. |
| 1264 | Subtitles, |
| 1265 | /// Any data stream (or might be an unrecognized audio/video stream). |
| 1266 | Data, |
| 1267 | /// Nonexistent stream. |
| 1268 | None, |
| 1269 | } |
| 1270 | |
| 1271 | impl fmt::Display for StreamType { |
| 1272 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 1273 | match *self { |
| 1274 | StreamType::Video => write!(f, "Video"), |
| 1275 | StreamType::Audio => write!(f, "Audio"), |
| 1276 | StreamType::Subtitles => write!(f, "Subtitles"), |
| 1277 | StreamType::Data => write!(f, "Data"), |
| 1278 | StreamType::None => write!(f, "-"), |
| 1279 | } |
| 1280 | } |
| 1281 | } |
| 1282 | |
| 1283 | /// Stream data. |
| 1284 | #[allow(dead_code)] |
| 1285 | #[derive(Clone)] |
| 1286 | pub struct NAStream { |
| 1287 | media_type: StreamType, |
| 1288 | /// Stream ID. |
| 1289 | pub id: u32, |
| 1290 | num: usize, |
| 1291 | info: NACodecInfoRef, |
| 1292 | /// Timebase numerator. |
| 1293 | pub tb_num: u32, |
| 1294 | /// Timebase denominator. |
| 1295 | pub tb_den: u32, |
| 1296 | /// Duration in timebase units (zero if not available). |
| 1297 | pub duration: u64, |
| 1298 | } |
| 1299 | |
| 1300 | /// A specialised reference-counted `NAStream` type. |
| 1301 | pub type NAStreamRef = Arc<NAStream>; |
| 1302 | |
| 1303 | /// Downscales the timebase by its greatest common denominator. |
| 1304 | #[allow(clippy::comparison_chain)] |
| 1305 | pub fn reduce_timebase(tb_num: u32, tb_den: u32) -> (u32, u32) { |
| 1306 | if tb_num == 0 { return (tb_num, tb_den); } |
| 1307 | if (tb_den % tb_num) == 0 { return (1, tb_den / tb_num); } |
| 1308 | |
| 1309 | let mut a = tb_num; |
| 1310 | let mut b = tb_den; |
| 1311 | |
| 1312 | while a != b { |
| 1313 | if a > b { a -= b; } |
| 1314 | else if b > a { b -= a; } |
| 1315 | } |
| 1316 | |
| 1317 | (tb_num / a, tb_den / a) |
| 1318 | } |
| 1319 | |
| 1320 | impl NAStream { |
| 1321 | /// Constructs a new `NAStream` instance. |
| 1322 | pub fn new(mt: StreamType, id: u32, info: NACodecInfo, tb_num: u32, tb_den: u32, duration: u64) -> Self { |
| 1323 | let (n, d) = reduce_timebase(tb_num, tb_den); |
| 1324 | NAStream { media_type: mt, id, num: 0, info: info.into_ref(), tb_num: n, tb_den: d, duration } |
| 1325 | } |
| 1326 | /// Returns stream id. |
| 1327 | pub fn get_id(&self) -> u32 { self.id } |
| 1328 | /// Returns stream type. |
| 1329 | pub fn get_media_type(&self) -> StreamType { self.media_type } |
| 1330 | /// Returns stream number assigned by demuxer. |
| 1331 | pub fn get_num(&self) -> usize { self.num } |
| 1332 | /// Sets stream number. |
| 1333 | pub fn set_num(&mut self, num: usize) { self.num = num; } |
| 1334 | /// Returns codec information. |
| 1335 | pub fn get_info(&self) -> NACodecInfoRef { self.info.clone() } |
| 1336 | /// Returns stream timebase. |
| 1337 | pub fn get_timebase(&self) -> (u32, u32) { (self.tb_num, self.tb_den) } |
| 1338 | /// Sets new stream timebase. |
| 1339 | pub fn set_timebase(&mut self, tb_num: u32, tb_den: u32) { |
| 1340 | let (n, d) = reduce_timebase(tb_num, tb_den); |
| 1341 | self.tb_num = n; |
| 1342 | self.tb_den = d; |
| 1343 | } |
| 1344 | /// Returns stream duration. |
| 1345 | pub fn get_duration(&self) -> usize { self.num } |
| 1346 | /// Converts current instance into a reference-counted one. |
| 1347 | pub fn into_ref(self) -> NAStreamRef { Arc::new(self) } |
| 1348 | } |
| 1349 | |
| 1350 | impl fmt::Display for NAStream { |
| 1351 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 1352 | write!(f, "({}#{} @ {}/{} - {})", self.media_type, self.id, self.tb_num, self.tb_den, self.info.get_properties()) |
| 1353 | } |
| 1354 | } |
| 1355 | |
| 1356 | /// Side data that may accompany demuxed data. |
| 1357 | #[derive(Clone)] |
| 1358 | pub enum NASideData { |
| 1359 | /// Palette information. |
| 1360 | /// |
| 1361 | /// This side data contains a flag signalling that palette has changed since previous time and a reference to the current palette. |
| 1362 | /// Palette is stored in 8-bit RGBA format. |
| 1363 | Palette(bool, Arc<[u8; 1024]>), |
| 1364 | /// Generic user data. |
| 1365 | UserData(Arc<Vec<u8>>), |
| 1366 | } |
| 1367 | |
| 1368 | /// Packet with compressed data. |
| 1369 | #[allow(dead_code)] |
| 1370 | pub struct NAPacket { |
| 1371 | stream: NAStreamRef, |
| 1372 | /// Packet timestamp. |
| 1373 | pub ts: NATimeInfo, |
| 1374 | buffer: NABufferRef<Vec<u8>>, |
| 1375 | /// Keyframe flag. |
| 1376 | pub keyframe: bool, |
| 1377 | // options: HashMap<String, NAValue<'a>>, |
| 1378 | /// Packet side data (e.g. palette for paletted formats). |
| 1379 | pub side_data: Vec<NASideData>, |
| 1380 | } |
| 1381 | |
| 1382 | impl NAPacket { |
| 1383 | /// Constructs a new `NAPacket` instance. |
| 1384 | pub fn new(str: NAStreamRef, ts: NATimeInfo, kf: bool, vec: Vec<u8>) -> Self { |
| 1385 | // let mut vec: Vec<u8> = Vec::new(); |
| 1386 | // vec.resize(size, 0); |
| 1387 | NAPacket { stream: str, ts, keyframe: kf, buffer: NABufferRef::new(vec), side_data: Vec::new() } |
| 1388 | } |
| 1389 | /// Constructs a new `NAPacket` instance reusing a buffer reference. |
| 1390 | pub fn new_from_refbuf(str: NAStreamRef, ts: NATimeInfo, kf: bool, buffer: NABufferRef<Vec<u8>>) -> Self { |
| 1391 | NAPacket { stream: str, ts, keyframe: kf, buffer, side_data: Vec::new() } |
| 1392 | } |
| 1393 | /// Returns information about the stream packet belongs to. |
| 1394 | pub fn get_stream(&self) -> NAStreamRef { self.stream.clone() } |
| 1395 | /// Returns packet timestamp. |
| 1396 | pub fn get_time_information(&self) -> NATimeInfo { self.ts } |
| 1397 | /// Returns packet presentation timestamp. |
| 1398 | pub fn get_pts(&self) -> Option<u64> { self.ts.get_pts() } |
| 1399 | /// Returns packet decoding timestamp. |
| 1400 | pub fn get_dts(&self) -> Option<u64> { self.ts.get_dts() } |
| 1401 | /// Returns packet duration. |
| 1402 | pub fn get_duration(&self) -> Option<u64> { self.ts.get_duration() } |
| 1403 | /// Reports whether this is a keyframe packet. |
| 1404 | pub fn is_keyframe(&self) -> bool { self.keyframe } |
| 1405 | /// Returns a reference to packet data. |
| 1406 | pub fn get_buffer(&self) -> NABufferRef<Vec<u8>> { self.buffer.clone() } |
| 1407 | /// Adds side data for a packet. |
| 1408 | pub fn add_side_data(&mut self, side_data: NASideData) { self.side_data.push(side_data); } |
| 1409 | /// Assigns packet to a new stream. |
| 1410 | pub fn reassign(&mut self, str: NAStreamRef, ts: NATimeInfo) { |
| 1411 | self.stream = str; |
| 1412 | self.ts = ts; |
| 1413 | } |
| 1414 | } |
| 1415 | |
| 1416 | impl Drop for NAPacket { |
| 1417 | fn drop(&mut self) {} |
| 1418 | } |
| 1419 | |
| 1420 | impl fmt::Display for NAPacket { |
| 1421 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
| 1422 | let mut ostr = format!("[pkt for {} size {}", self.stream, self.buffer.len()); |
| 1423 | if let Some(pts) = self.ts.pts { ostr = format!("{} pts {}", ostr, pts); } |
| 1424 | if let Some(dts) = self.ts.dts { ostr = format!("{} dts {}", ostr, dts); } |
| 1425 | if let Some(dur) = self.ts.duration { ostr = format!("{} duration {}", ostr, dur); } |
| 1426 | if self.keyframe { ostr = format!("{} kf", ostr); } |
| 1427 | ostr += "]"; |
| 1428 | write!(f, "{}", ostr) |
| 1429 | } |
| 1430 | } |
| 1431 | |
| 1432 | #[cfg(test)] |
| 1433 | mod test { |
| 1434 | use super::*; |
| 1435 | |
| 1436 | #[test] |
| 1437 | fn test_time_parse() { |
| 1438 | assert_eq!(NATimePoint::PTS(42).to_string(), "42pts"); |
| 1439 | assert_eq!(NATimePoint::Milliseconds(4242000).to_string(), "1:10:42"); |
| 1440 | assert_eq!(NATimePoint::Milliseconds(42424242).to_string(), "11:47:04.242"); |
| 1441 | let ret = NATimePoint::from_str("42pts"); |
| 1442 | assert_eq!(ret.unwrap(), NATimePoint::PTS(42)); |
| 1443 | let ret = NATimePoint::from_str("1:2:3"); |
| 1444 | assert_eq!(ret.unwrap(), NATimePoint::Milliseconds(3723000)); |
| 1445 | let ret = NATimePoint::from_str("1:2:3.42"); |
| 1446 | assert_eq!(ret.unwrap(), NATimePoint::Milliseconds(3723420)); |
| 1447 | } |
| 1448 | } |