X-Git-Url: https://git.nihav.org/?a=blobdiff_plain;f=src%2Fframe.rs;h=e739559a2e3f69adc0736d67ee2ecdf085398047;hb=f6fa6f3404393e399a05c5d5cda7a35c4e41fb4a;hp=6431bc2e944cf5eb13eca58f5a9d5b2bead3f9a0;hpb=34e4b6177aee170ab36ba9a91185cab1d51266ff;p=nihav.git diff --git a/src/frame.rs b/src/frame.rs dissimilarity index 69% index 6431bc2..e739559 100644 --- a/src/frame.rs +++ b/src/frame.rs @@ -1,174 +1,664 @@ -use std::collections::HashMap; -use std::rc::Rc; - -#[allow(dead_code)] -#[derive(Copy,Clone)] -pub struct NASoniton { - bits: u8, - is_be: bool, - packed: bool, - planar: bool, - float: bool, - signed: bool, -} - -#[allow(dead_code)] -pub const SND_U8_FORMAT: NASoniton = NASoniton { bits: 8, is_be: false, packed: false, planar: false, float: false, signed: false }; -#[allow(dead_code)] -pub const SND_S16_FORMAT: NASoniton = NASoniton { bits: 16, is_be: false, packed: false, planar: false, float: false, signed: true }; - -#[allow(dead_code)] -#[derive(Clone,Copy)] -pub struct NAAudioInfo { - sample_rate: u32, - channels: u8, - format: NASoniton, - block_len: usize, -} - -impl NAAudioInfo { - pub fn new(sr: u32, ch: u8, fmt: NASoniton, bl: usize) -> Self { - NAAudioInfo { sample_rate: sr, channels: ch, format: fmt, block_len: bl } - } -} - -#[derive(Debug,Clone,Copy)] -pub enum ColorModel { - RGB, - YUV, - CMYK, - HSV, - LAB, -} - -#[allow(dead_code)] -#[derive(Clone,Copy)] -pub struct NAPixelChromaton { - h_ss: u8, - v_ss: u8, - is_packed: bool, - depth: u8, - shift: u8, - comp_offs: u8, - next_elem: u8, -} - -#[allow(dead_code)] -#[derive(Clone,Copy)] -pub struct NAPixelFormaton { - model: ColorModel, - components: u8, - comp_info: [Option; 5], - elem_size: u8, - has_alpha: bool, - is_palette: bool, -} - -macro_rules! chromaton { - ($hs: expr, $vs: expr, $pck: expr, $d: expr, $sh: expr, $co: expr, $ne: expr) => ({ - Some(NAPixelChromaton{ h_ss: $hs, v_ss: $vs, is_packed: $pck, depth: $d, shift: $sh, comp_offs: $co, next_elem: $ne }) - }); - (yuv8; $hs: expr, $vs: expr, $co: expr) => ({ - Some(NAPixelChromaton{ h_ss: $hs, v_ss: $vs, is_packed: false, depth: 8, shift: 0, comp_offs: $co, next_elem: 1 }) - }); - (pal8; $co: expr) => ({ - Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, is_packed: true, depth: 8, shift: 0, comp_offs: $co, next_elem: 3 }) - }); -} - -#[allow(dead_code)] -pub const YUV420_FORMAT: NAPixelFormaton = NAPixelFormaton { model: ColorModel::YUV, components: 3, - comp_info: [ - chromaton!(0, 0, false, 8, 0, 0, 1), - chromaton!(yuv8; 1, 1, 1), - chromaton!(yuv8; 1, 1, 2), - None, None], - elem_size: 0, has_alpha: false, is_palette: false }; - -#[allow(dead_code)] -pub const PAL8_FORMAT: NAPixelFormaton = NAPixelFormaton { model: ColorModel::RGB, components: 3, - comp_info: [ - chromaton!(pal8; 0), - chromaton!(pal8; 1), - chromaton!(pal8; 2), - None, None], - elem_size: 1, has_alpha: false, is_palette: true }; - - -#[allow(dead_code)] -#[derive(Clone,Copy)] -pub struct NAVideoInfo { - width: u32, - height: u32, - flipped: bool, - format: NAPixelFormaton, -} - -impl NAVideoInfo { - pub fn new(w: u32, h: u32, flip: bool, fmt: NAPixelFormaton) -> Self { - NAVideoInfo { width: w, height: h, flipped: flip, format: fmt } - } -} - -#[derive(Clone,Copy)] -pub enum NACodecTypeInfo { - None, - Audio(NAAudioInfo), - Video(NAVideoInfo), -} - -#[allow(dead_code)] -pub struct NABuffer<'a> { - id: u64, - data: &'a mut [u8], -} - -#[allow(dead_code)] -#[derive(Clone)] -pub struct NACodecInfo { - properties: NACodecTypeInfo, - extradata: Option>>, -} - -impl NACodecInfo { - pub fn new(p: NACodecTypeInfo, edata: Option>) -> Self { - let extradata = match edata { - None => None, - Some(vec) => Some(Rc::new(vec)), - }; - NACodecInfo { properties: p, extradata: extradata } - } - pub fn get_properties(&self) -> NACodecTypeInfo { self.properties } - pub fn get_extradata(&self) -> Option>> { - if let Some(ref vec) = self.extradata { return Some(vec.clone()); } - None - } -} - -pub trait NABufferAllocator { - fn alloc_buf(info: &NACodecInfo) -> NABuffer<'static>; -} - -#[derive(Debug)] -pub enum NAValue<'a> { - None, - Int(i32), - Long(i64), - String(String), - Data(&'a [u8]), -} - -#[allow(dead_code)] -pub struct NAFrame<'a> { - pts: Option, - dts: Option, - duration: Option, - buffer: &'a mut NABuffer<'a>, - info: &'a NACodecInfo, - options: HashMap>, -} - -#[allow(dead_code)] -pub struct NACodecContext<'a> { - info: &'a NACodecInfo, -} +use std::cmp::max; +use std::collections::HashMap; +use std::fmt; +use std::rc::Rc; +use std::cell::*; +use formats::*; + +#[allow(dead_code)] +#[derive(Clone,Copy,PartialEq)] +pub struct NAAudioInfo { + sample_rate: u32, + channels: u8, + format: NASoniton, + block_len: usize, +} + +impl NAAudioInfo { + pub fn new(sr: u32, ch: u8, fmt: NASoniton, bl: usize) -> Self { + NAAudioInfo { sample_rate: sr, channels: ch, format: fmt, block_len: bl } + } + pub fn get_sample_rate(&self) -> u32 { self.sample_rate } + pub fn get_channels(&self) -> u8 { self.channels } + pub fn get_format(&self) -> NASoniton { self.format } + pub fn get_block_len(&self) -> usize { self.block_len } +} + +impl fmt::Display for NAAudioInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} Hz, {} ch", self.sample_rate, self.channels) + } +} + +#[allow(dead_code)] +#[derive(Clone,Copy,PartialEq)] +pub struct NAVideoInfo { + width: usize, + height: usize, + flipped: bool, + format: NAPixelFormaton, +} + +impl NAVideoInfo { + pub fn new(w: usize, h: usize, flip: bool, fmt: NAPixelFormaton) -> Self { + NAVideoInfo { width: w, height: h, flipped: flip, format: fmt } + } + pub fn get_width(&self) -> usize { self.width as usize } + pub fn get_height(&self) -> usize { self.height as usize } + pub fn is_flipped(&self) -> bool { self.flipped } + pub fn get_format(&self) -> NAPixelFormaton { self.format } +} + +impl fmt::Display for NAVideoInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}x{}", self.width, self.height) + } +} + +#[derive(Clone,Copy,PartialEq)] +pub enum NACodecTypeInfo { + None, + Audio(NAAudioInfo), + Video(NAVideoInfo), +} + +impl NACodecTypeInfo { + pub fn get_video_info(&self) -> Option { + match *self { + NACodecTypeInfo::Video(vinfo) => Some(vinfo), + _ => None, + } + } + pub fn get_audio_info(&self) -> Option { + match *self { + NACodecTypeInfo::Audio(ainfo) => Some(ainfo), + _ => None, + } + } +} + +impl fmt::Display for NACodecTypeInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let ret = match *self { + NACodecTypeInfo::None => format!(""), + NACodecTypeInfo::Audio(fmt) => format!("{}", fmt), + NACodecTypeInfo::Video(fmt) => format!("{}", fmt), + }; + write!(f, "{}", ret) + } +} + +pub type NABufferRefT = Rc>>; + +#[derive(Clone)] +pub struct NAVideoBuffer { + info: NAVideoInfo, + data: NABufferRefT, + offs: Vec, + strides: Vec, +} + +impl NAVideoBuffer { + pub fn get_offset(&self, idx: usize) -> usize { + if idx >= self.offs.len() { 0 } + else { self.offs[idx] } + } + pub fn get_info(&self) -> NAVideoInfo { self.info } + pub fn get_data(&self) -> Ref> { self.data.borrow() } + pub fn get_data_mut(&mut self) -> RefMut> { self.data.borrow_mut() } + pub fn copy_buffer(&mut self) -> Self { + let mut data: Vec = Vec::with_capacity(self.data.borrow().len()); + data.clone_from(self.data.borrow().as_ref()); + let mut offs: Vec = Vec::with_capacity(self.offs.len()); + offs.clone_from(&self.offs); + let mut strides: Vec = Vec::with_capacity(self.strides.len()); + strides.clone_from(&self.strides); + NAVideoBuffer { info: self.info, data: Rc::new(RefCell::new(data)), offs: offs, strides: strides } + } + pub fn get_stride(&self, idx: usize) -> usize { + if idx >= self.strides.len() { return 0; } + self.strides[idx] + } + pub fn get_dimensions(&self, idx: usize) -> (usize, usize) { + get_plane_size(&self.info, idx) + } +} + +#[derive(Clone)] +pub struct NAAudioBuffer { + info: NAAudioInfo, + data: NABufferRefT, + offs: Vec, + chmap: NAChannelMap, +} + +impl NAAudioBuffer { + pub fn get_offset(&self, idx: usize) -> usize { + if idx >= self.offs.len() { 0 } + else { self.offs[idx] } + } + pub fn get_info(&self) -> NAAudioInfo { self.info } + pub fn get_chmap(&self) -> NAChannelMap { self.chmap.clone() } + pub fn get_data(&self) -> Ref> { self.data.borrow() } + pub fn get_data_mut(&mut self) -> RefMut> { self.data.borrow_mut() } + pub fn copy_buffer(&mut self) -> Self { + let mut data: Vec = Vec::with_capacity(self.data.borrow().len()); + data.clone_from(self.data.borrow().as_ref()); + let mut offs: Vec = Vec::with_capacity(self.offs.len()); + offs.clone_from(&self.offs); + NAAudioBuffer { info: self.info, data: Rc::new(RefCell::new(data)), offs: offs, chmap: self.get_chmap() } + } +} + +impl NAAudioBuffer { + pub fn new_from_buf(info: NAAudioInfo, data: NABufferRefT, chmap: NAChannelMap) -> Self { + NAAudioBuffer { info: info, data: data, chmap: chmap, offs: Vec::new() } + } +} + +#[derive(Clone)] +pub enum NABufferType { + Video (NAVideoBuffer), + Video16 (NAVideoBuffer), + VideoPacked(NAVideoBuffer), + AudioU8 (NAAudioBuffer), + AudioI16 (NAAudioBuffer), + AudioI32 (NAAudioBuffer), + AudioF32 (NAAudioBuffer), + AudioPacked(NAAudioBuffer), + Data (NABufferRefT), + None, +} + +impl NABufferType { + pub fn get_offset(&self, idx: usize) -> usize { + match *self { + NABufferType::Video(ref vb) => vb.get_offset(idx), + NABufferType::Video16(ref vb) => vb.get_offset(idx), + NABufferType::VideoPacked(ref vb) => vb.get_offset(idx), + NABufferType::AudioU8(ref ab) => ab.get_offset(idx), + NABufferType::AudioI16(ref ab) => ab.get_offset(idx), + NABufferType::AudioF32(ref ab) => ab.get_offset(idx), + NABufferType::AudioPacked(ref ab) => ab.get_offset(idx), + _ => 0, + } + } + pub fn get_vbuf(&mut self) -> Option> { + match *self { + NABufferType::Video(ref vb) => Some(vb.clone()), + NABufferType::VideoPacked(ref vb) => Some(vb.clone()), + _ => None, + } + } + pub fn get_vbuf16(&mut self) -> Option> { + match *self { + NABufferType::Video16(ref vb) => Some(vb.clone()), + _ => None, + } + } + pub fn get_abuf_u8(&mut self) -> Option> { + match *self { + NABufferType::AudioU8(ref ab) => Some(ab.clone()), + NABufferType::AudioPacked(ref ab) => Some(ab.clone()), + _ => None, + } + } + pub fn get_abuf_i16(&mut self) -> Option> { + match *self { + NABufferType::AudioI16(ref ab) => Some(ab.clone()), + _ => None, + } + } + pub fn get_abuf_i32(&mut self) -> Option> { + match *self { + NABufferType::AudioI32(ref ab) => Some(ab.clone()), + _ => None, + } + } + pub fn get_abuf_f32(&mut self) -> Option> { + match *self { + NABufferType::AudioF32(ref ab) => Some(ab.clone()), + _ => None, + } + } +} + +#[derive(Debug,Clone,Copy,PartialEq)] +pub enum AllocatorError { + TooLargeDimensions, + FormatError, +} + +pub fn alloc_video_buffer(vinfo: NAVideoInfo, align: u8) -> Result { + let fmt = &vinfo.format; + let mut new_size: usize = 0; + let mut offs: Vec = Vec::new(); + let mut strides: Vec = Vec::new(); + + for i in 0..fmt.get_num_comp() { + if fmt.get_chromaton(i) == None { return Err(AllocatorError::FormatError); } + } + + let align_mod = ((1 << align) as usize) - 1; + let width = ((vinfo.width as usize) + align_mod) & !align_mod; + let height = ((vinfo.height as usize) + align_mod) & !align_mod; + let mut max_depth = 0; + let mut all_packed = true; + for i in 0..fmt.get_num_comp() { + let ochr = fmt.get_chromaton(i); + if let None = ochr { continue; } + let chr = ochr.unwrap(); + if !chr.is_packed() { + all_packed = false; + break; + } + max_depth = max(max_depth, chr.get_depth()); + } + +//todo semi-packed like NV12 + if fmt.is_paletted() { +//todo various-sized palettes? + let stride = vinfo.get_format().get_chromaton(0).unwrap().get_linesize(width); + let pic_sz = stride.checked_mul(height); + if pic_sz == None { return Err(AllocatorError::TooLargeDimensions); } + let pal_size = 256 * (fmt.get_elem_size() as usize); + let new_size = pic_sz.unwrap().checked_add(pal_size); + if new_size == None { return Err(AllocatorError::TooLargeDimensions); } + offs.push(0); + offs.push(stride * height); + strides.push(stride); + let mut data: Vec = Vec::with_capacity(new_size.unwrap()); + data.resize(new_size.unwrap(), 0); + let buf: NAVideoBuffer = NAVideoBuffer { data: Rc::new(RefCell::new(data)), info: vinfo, offs: offs, strides: strides }; + Ok(NABufferType::Video(buf)) + } else if !all_packed { + for i in 0..fmt.get_num_comp() { + let ochr = fmt.get_chromaton(i); + if let None = ochr { continue; } + let chr = ochr.unwrap(); + if !vinfo.is_flipped() { + offs.push(new_size as usize); + } + let stride = chr.get_linesize(width); + let cur_h = chr.get_height(height); + let cur_sz = stride.checked_mul(cur_h); + if cur_sz == None { return Err(AllocatorError::TooLargeDimensions); } + let new_sz = new_size.checked_add(cur_sz.unwrap()); + if new_sz == None { return Err(AllocatorError::TooLargeDimensions); } + new_size = new_sz.unwrap(); + if vinfo.is_flipped() { + offs.push(new_size as usize); + } + strides.push(stride); + } + if max_depth <= 8 { + let mut data: Vec = Vec::with_capacity(new_size); + data.resize(new_size, 0); + let buf: NAVideoBuffer = NAVideoBuffer { data: Rc::new(RefCell::new(data)), info: vinfo, offs: offs, strides: strides }; + Ok(NABufferType::Video(buf)) + } else { + let mut data: Vec = Vec::with_capacity(new_size); + data.resize(new_size, 0); + let buf: NAVideoBuffer = NAVideoBuffer { data: Rc::new(RefCell::new(data)), info: vinfo, offs: offs, strides: strides }; + Ok(NABufferType::Video16(buf)) + } + } else { + let elem_sz = fmt.get_elem_size(); + let line_sz = width.checked_mul(elem_sz as usize); + if line_sz == None { return Err(AllocatorError::TooLargeDimensions); } + let new_sz = line_sz.unwrap().checked_mul(height); + if new_sz == None { return Err(AllocatorError::TooLargeDimensions); } + new_size = new_sz.unwrap(); + let mut data: Vec = Vec::with_capacity(new_size); + data.resize(new_size, 0); + strides.push(line_sz.unwrap()); + let buf: NAVideoBuffer = NAVideoBuffer { data: Rc::new(RefCell::new(data)), info: vinfo, offs: offs, strides: strides }; + Ok(NABufferType::VideoPacked(buf)) + } +} + +pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelMap) -> Result { + let mut offs: Vec = Vec::new(); + if ainfo.format.is_planar() { + let len = nsamples.checked_mul(ainfo.channels as usize); + if len == None { return Err(AllocatorError::TooLargeDimensions); } + let length = len.unwrap(); + for i in 0..ainfo.channels { + offs.push((i as usize) * nsamples); + } + if ainfo.format.is_float() { + if ainfo.format.get_bits() == 32 { + let mut data: Vec = Vec::with_capacity(length); + data.resize(length, 0.0); + let buf: NAAudioBuffer = NAAudioBuffer { data: Rc::new(RefCell::new(data)), info: ainfo, offs: offs, chmap: chmap }; + Ok(NABufferType::AudioF32(buf)) + } else { + Err(AllocatorError::TooLargeDimensions) + } + } else { + if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() { + let mut data: Vec = Vec::with_capacity(length); + data.resize(length, 0); + let buf: NAAudioBuffer = NAAudioBuffer { data: Rc::new(RefCell::new(data)), info: ainfo, offs: offs, chmap: chmap }; + Ok(NABufferType::AudioU8(buf)) + } else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() { + let mut data: Vec = Vec::with_capacity(length); + data.resize(length, 0); + let buf: NAAudioBuffer = NAAudioBuffer { data: Rc::new(RefCell::new(data)), info: ainfo, offs: offs, chmap: chmap }; + Ok(NABufferType::AudioI16(buf)) + } else { + Err(AllocatorError::TooLargeDimensions) + } + } + } else { + let len = nsamples.checked_mul(ainfo.channels as usize); + if len == None { return Err(AllocatorError::TooLargeDimensions); } + let length = ainfo.format.get_audio_size(len.unwrap() as u64); + let mut data: Vec = Vec::with_capacity(length); + data.resize(length, 0); + let buf: NAAudioBuffer = NAAudioBuffer { data: Rc::new(RefCell::new(data)), info: ainfo, offs: offs, chmap: chmap }; + Ok(NABufferType::AudioPacked(buf)) + } +} + +pub fn alloc_data_buffer(size: usize) -> Result { + let mut data: Vec = Vec::with_capacity(size); + data.resize(size, 0); + let buf: NABufferRefT = Rc::new(RefCell::new(data)); + Ok(NABufferType::Data(buf)) +} + +pub fn copy_buffer(buf: NABufferType) -> NABufferType { + buf.clone() +} + +#[allow(dead_code)] +#[derive(Clone)] +pub struct NACodecInfo { + name: &'static str, + properties: NACodecTypeInfo, + extradata: Option>>, +} + +impl NACodecInfo { + pub fn new(name: &'static str, p: NACodecTypeInfo, edata: Option>) -> Self { + let extradata = match edata { + None => None, + Some(vec) => Some(Rc::new(vec)), + }; + NACodecInfo { name: name, properties: p, extradata: extradata } + } + pub fn new_ref(name: &'static str, p: NACodecTypeInfo, edata: Option>>) -> Self { + NACodecInfo { name: name, properties: p, extradata: edata } + } + pub fn get_properties(&self) -> NACodecTypeInfo { self.properties } + pub fn get_extradata(&self) -> Option>> { + if let Some(ref vec) = self.extradata { return Some(vec.clone()); } + None + } + pub fn get_name(&self) -> &'static str { self.name } + pub fn is_video(&self) -> bool { + if let NACodecTypeInfo::Video(_) = self.properties { return true; } + false + } + pub fn is_audio(&self) -> bool { + if let NACodecTypeInfo::Audio(_) = self.properties { return true; } + false + } +} + +impl fmt::Display for NACodecInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let edata = match self.extradata.clone() { + None => format!("no extradata"), + Some(v) => format!("{} byte(s) of extradata", v.len()), + }; + write!(f, "{}: {} {}", self.name, self.properties, edata) + } +} + +pub const DUMMY_CODEC_INFO: NACodecInfo = NACodecInfo { + name: "none", + properties: NACodecTypeInfo::None, + extradata: None }; + +#[derive(Debug,Clone)] +pub enum NAValue { + None, + Int(i32), + Long(i64), + String(String), + Data(Rc>), +} + +#[derive(Debug,Clone,Copy,PartialEq)] +#[allow(dead_code)] +pub enum FrameType { + I, + P, + B, + Skip, + Other, +} + +impl fmt::Display for FrameType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + FrameType::I => write!(f, "I"), + FrameType::P => write!(f, "P"), + FrameType::B => write!(f, "B"), + FrameType::Skip => write!(f, "skip"), + FrameType::Other => write!(f, "x"), + } + } +} + +#[derive(Debug,Clone,Copy)] +pub struct NATimeInfo { + pts: Option, + dts: Option, + duration: Option, + tb_num: u32, + tb_den: u32, +} + +impl NATimeInfo { + pub fn new(pts: Option, dts: Option, duration: Option, tb_num: u32, tb_den: u32) -> Self { + NATimeInfo { pts: pts, dts: dts, duration: duration, tb_num: tb_num, tb_den: tb_den } + } + pub fn get_pts(&self) -> Option { self.pts } + pub fn get_dts(&self) -> Option { self.dts } + pub fn get_duration(&self) -> Option { self.duration } + pub fn set_pts(&mut self, pts: Option) { self.pts = pts; } + pub fn set_dts(&mut self, dts: Option) { self.dts = dts; } + pub fn set_duration(&mut self, dur: Option) { self.duration = dur; } +} + +#[allow(dead_code)] +#[derive(Clone)] +pub struct NAFrame { + ts: NATimeInfo, + buffer: NABufferType, + info: Rc, + ftype: FrameType, + key: bool, + options: HashMap, +} + +pub type NAFrameRef = Rc>; + +fn get_plane_size(info: &NAVideoInfo, idx: usize) -> (usize, usize) { + let chromaton = info.get_format().get_chromaton(idx); + if let None = chromaton { return (0, 0); } + let (hs, vs) = chromaton.unwrap().get_subsampling(); + let w = (info.get_width() + ((1 << hs) - 1)) >> hs; + let h = (info.get_height() + ((1 << vs) - 1)) >> vs; + (w, h) +} + +impl NAFrame { + pub fn new(ts: NATimeInfo, + ftype: FrameType, + keyframe: bool, + info: Rc, + options: HashMap, + buffer: NABufferType) -> Self { + NAFrame { ts: ts, buffer: buffer, info: info, ftype: ftype, key: keyframe, options: options } + } + pub fn get_frame_type(&self) -> FrameType { self.ftype } + pub fn is_keyframe(&self) -> bool { self.key } + pub fn set_frame_type(&mut self, ftype: FrameType) { self.ftype = ftype; } + pub fn set_keyframe(&mut self, key: bool) { self.key = key; } + pub fn get_time_information(&self) -> NATimeInfo { self.ts } + pub fn get_pts(&self) -> Option { self.ts.get_pts() } + pub fn get_dts(&self) -> Option { self.ts.get_dts() } + pub fn get_duration(&self) -> Option { self.ts.get_duration() } + pub fn set_pts(&mut self, pts: Option) { self.ts.set_pts(pts); } + pub fn set_dts(&mut self, dts: Option) { self.ts.set_dts(dts); } + pub fn set_duration(&mut self, dur: Option) { self.ts.set_duration(dur); } + + pub fn get_buffer(&self) -> NABufferType { self.buffer.clone() } +} + +impl fmt::Display for NAFrame { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut foo = format!("frame type {}", self.ftype); + if let Some(pts) = self.ts.pts { foo = format!("{} pts {}", foo, pts); } + if let Some(dts) = self.ts.dts { foo = format!("{} dts {}", foo, dts); } + if let Some(dur) = self.ts.duration { foo = format!("{} duration {}", foo, dur); } + if self.key { foo = format!("{} kf", foo); } + write!(f, "[{}]", foo) + } +} + +/// Possible stream types. +#[derive(Debug,Clone,Copy)] +#[allow(dead_code)] +pub enum StreamType { + /// video stream + Video, + /// audio stream + Audio, + /// subtitles + Subtitles, + /// any data stream (or might be an unrecognized audio/video stream) + Data, + /// nonexistent stream + None, +} + +impl fmt::Display for StreamType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + StreamType::Video => write!(f, "Video"), + StreamType::Audio => write!(f, "Audio"), + StreamType::Subtitles => write!(f, "Subtitles"), + StreamType::Data => write!(f, "Data"), + StreamType::None => write!(f, "-"), + } + } +} + +#[allow(dead_code)] +#[derive(Clone)] +pub struct NAStream { + media_type: StreamType, + id: u32, + num: usize, + info: Rc, + tb_num: u32, + tb_den: u32, +} + +pub fn reduce_timebase(tb_num: u32, tb_den: u32) -> (u32, u32) { + if tb_num == 0 { return (tb_num, tb_den); } + if (tb_den % tb_num) == 0 { return (1, tb_den / tb_num); } + + let mut a = tb_num; + let mut b = tb_den; + + while a != b { + if a > b { a -= b; } + else if b > a { b -= a; } + } + + (tb_num / a, tb_den / a) +} + +impl NAStream { + pub fn new(mt: StreamType, id: u32, info: NACodecInfo, tb_num: u32, tb_den: u32) -> Self { + let (n, d) = reduce_timebase(tb_num, tb_den); + NAStream { media_type: mt, id: id, num: 0, info: Rc::new(info), tb_num: n, tb_den: d } + } + pub fn get_id(&self) -> u32 { self.id } + pub fn get_num(&self) -> usize { self.num } + pub fn set_num(&mut self, num: usize) { self.num = num; } + pub fn get_info(&self) -> Rc { self.info.clone() } + pub fn get_timebase(&self) -> (u32, u32) { (self.tb_num, self.tb_den) } + pub fn set_timebase(&mut self, tb_num: u32, tb_den: u32) { + let (n, d) = reduce_timebase(tb_num, tb_den); + self.tb_num = n; + self.tb_den = d; + } +} + +impl fmt::Display for NAStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "({}#{} @ {}/{} - {})", self.media_type, self.id, self.tb_num, self.tb_den, self.info.get_properties()) + } +} + +#[allow(dead_code)] +pub struct NAPacket { + stream: Rc, + ts: NATimeInfo, + buffer: Rc>, + keyframe: bool, +// options: HashMap>, +} + +impl NAPacket { + pub fn new(str: Rc, ts: NATimeInfo, kf: bool, vec: Vec) -> Self { +// let mut vec: Vec = Vec::new(); +// vec.resize(size, 0); + NAPacket { stream: str, ts: ts, keyframe: kf, buffer: Rc::new(vec) } + } + pub fn get_stream(&self) -> Rc { self.stream.clone() } + pub fn get_time_information(&self) -> NATimeInfo { self.ts } + pub fn get_pts(&self) -> Option { self.ts.get_pts() } + pub fn get_dts(&self) -> Option { self.ts.get_dts() } + pub fn get_duration(&self) -> Option { self.ts.get_duration() } + pub fn is_keyframe(&self) -> bool { self.keyframe } + pub fn get_buffer(&self) -> Rc> { self.buffer.clone() } +} + +impl Drop for NAPacket { + fn drop(&mut self) {} +} + +impl fmt::Display for NAPacket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut foo = format!("[pkt for {} size {}", self.stream, self.buffer.len()); + if let Some(pts) = self.ts.pts { foo = format!("{} pts {}", foo, pts); } + if let Some(dts) = self.ts.dts { foo = format!("{} dts {}", foo, dts); } + if let Some(dur) = self.ts.duration { foo = format!("{} duration {}", foo, dur); } + if self.keyframe { foo = format!("{} kf", foo); } + foo = foo + "]"; + write!(f, "{}", foo) + } +} + +pub trait FrameFromPacket { + fn new_from_pkt(pkt: &NAPacket, info: Rc, buf: NABufferType) -> NAFrame; + fn fill_timestamps(&mut self, pkt: &NAPacket); +} + +impl FrameFromPacket for NAFrame { + fn new_from_pkt(pkt: &NAPacket, info: Rc, buf: NABufferType) -> NAFrame { + NAFrame::new(pkt.ts, FrameType::Other, pkt.keyframe, info, HashMap::new(), buf) + } + fn fill_timestamps(&mut self, pkt: &NAPacket) { + self.ts = pkt.get_time_information(); + } +} +