--- /dev/null
+[package]
+name = "nihav-player"
+version = "0.1.0"
+authors = ["Kostya Shishkov <kostya.shishkov@gmail.com>"]
+edition = "2018"
+
+[dependencies]
+nihav_core = { path="../../nihav-core" }
+nihav_registry = { path="../../nihav-registry" }
+nihav_allstuff = { path="../../nihav-allstuff" }
+
+sdl2 = "^0.33"
+
+[features]
+default = []
+debug = []
\ No newline at end of file
--- /dev/null
+# nihav-videoplayer
+
+nihav-player is a simple video player based on of NihAV.
+
+## Getting Started
+
+In order to build the player, put it into directory with other NihAV crates and invoke `cargo build`.
+
+Usage: `nihav-player [options] inputfile1 inputfile2 ...`. Known options are:
+- `-an` tells player to ignore audio stream
+- `-ae` tells player to play audio stream again
+- `-vn` tells player to ignore video stream
+- `-ve` tells player to play video stream again
+- `-seek` tells player to start playing file from the specified time
+- `-vol` tells player to set playback volume (in percents to the normal one)
+
+Recognized commands:
+- escape key or `q` - quit player
+- left/right arrows - tell player to skip to 10 seconds earlier or later
+- up/down arrows - tell player to skip to 1 minute earlier or later
+- pageup/pagedown - tell player to skip to 10 minutes earlier or later
+- spacebar - pause/unpause playback
+- plus/minus - increase/decrease volume by ten percent
+- `m` - mute or unmute audio
+- `h` - cycle through frame skip modes (none/reference frames only/intra frames only)
+
+## Contributing
+
+You're not supposed to. Even I hardly do that so why should you?
+
+## License
+
+NihAV is licensed under GNU Affero Public License - see [COPYING] for details.
+
+Parts of the project can be relicensed to other free licenses like LGPLv2 on request.
--- /dev/null
+use std::time::Duration;
+use std::thread::JoinHandle;
+use std::sync::{Arc, Mutex};
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+use std::sync::mpsc::{Receiver, SyncSender, TrySendError};
+use std::str::FromStr;
+
+use sdl2::AudioSubsystem;
+use sdl2::audio::{AudioDevice, AudioCallback, AudioSpecDesired};
+
+use nihav_core::formats::*;
+use nihav_core::codecs::*;
+use nihav_core::soundcvt::*;
+
+use super::{DecoderStuff, PktSendEvent};
+
+static SKIP_ADECODING: AtomicBool = AtomicBool::new(false);
+static AUDIO_VOLUME: AtomicUsize = AtomicUsize::new(100);
+static AUDIO_END: AtomicBool = AtomicBool::new(false);
+static CUR_QUEUE_FILL: AtomicUsize = AtomicUsize::new(0);
+static SAMPLE_RATE: AtomicUsize = AtomicUsize::new(0);
+static CHANNELS: AtomicUsize = AtomicUsize::new(0);
+
+static CURRENT_TIME: AtomicUsize = AtomicUsize::new(0);
+static CURRENT_TIME_SET: AtomicBool = AtomicBool::new(false);
+
+const QUEUE_INITIAL_SIZE: usize = 16384;
+const QUEUE_REFILL_LIMIT: usize = 262144;
+
+struct AudioQueue {
+ queue: Vec<i16>,
+ start: usize,
+ end: usize,
+ srate: usize,
+ chans: usize,
+ spos: usize,
+}
+
+impl AudioQueue {
+ fn new(srate: usize, chans: usize) -> Self {
+ SAMPLE_RATE.store(srate, Ordering::Relaxed);
+ CHANNELS.store(chans, Ordering::Relaxed);
+ CUR_QUEUE_FILL.store(0, Ordering::Relaxed);
+ Self {
+ queue: Vec::with_capacity(QUEUE_INITIAL_SIZE),
+ start: 0,
+ end: 0,
+ srate, chans,
+ spos: 0,
+ }
+ }
+ fn home(&mut self) {
+ if self.start == 0 { return; }
+ let fill = self.fill();
+ if fill > 0 {
+ if fill < self.start {
+ let (dst, src) = self.queue.split_at_mut(self.start);
+ dst[..fill].copy_from_slice(&src[..fill]);
+ } else {
+ for i in 0..fill {
+ self.queue[i] = self.queue[self.start + i];
+ }
+ }
+ }
+ self.start = 0;
+ self.end = fill;
+ }
+ fn set_time(&mut self) {
+ let fill = self.fill();
+ let samp_time = self.spos.saturating_sub(fill / self.chans);
+ CURRENT_TIME.store(samp_time * 1000 / self.srate, Ordering::Relaxed);
+ CURRENT_TIME_SET.store(true, Ordering::Relaxed);
+ CUR_QUEUE_FILL.store(self.fill(), Ordering::Relaxed);
+ }
+ fn add(&mut self, src: &[i16], samplepos: usize) {
+ if self.end + src.len() > self.queue.len() {
+ self.home();
+ }
+ if self.end + src.len() > self.queue.len() {
+ self.queue.resize(self.end + src.len(), 0);
+ }
+ self.queue[self.end..][..src.len()].copy_from_slice(&src);
+ self.end += src.len();
+ self.spos = samplepos;
+ self.set_time();
+ }
+ fn add_bytes(&mut self, src: &[u8], samplepos: usize) {
+ let srclen = src.len() / 2;
+ if self.end + srclen > self.queue.len() {
+ self.home();
+ }
+ if self.end + srclen > self.queue.len() {
+ self.queue.resize(self.end + srclen, 0);
+ }
+ for (dst, src) in self.queue[self.end..][..srclen].iter_mut().zip(src.chunks_exact(2)) {
+ *dst = (u16::from(src[0]) + u16::from(src[1]) * 256) as i16;
+ }
+ self.end += srclen;
+ self.spos = samplepos;
+ self.set_time();
+ }
+ fn drain(&mut self, size: usize) {
+ let fill = self.fill();
+ if size >= fill {
+ self.flush();
+ } else {
+ self.start += size;
+ }
+ self.set_time();
+ }
+ fn fill(&self) -> usize { self.end - self.start }
+ fn flush(&mut self) {
+ self.start = 0;
+ self.end = 0;
+ }
+}
+
+pub struct AudioOutput {
+ queue: Arc<Mutex<AudioQueue>>,
+}
+
+impl AudioCallback for AudioOutput {
+ type Channel = i16;
+
+ fn callback(&mut self, out: &mut [Self::Channel]) {
+ let mut queue = self.queue.lock().unwrap();
+ let dstlen = out.len();
+ let copylen = queue.fill().min(dstlen);
+ let volume = AUDIO_VOLUME.load(Ordering::Relaxed) as i32;
+ if volume == 100 {
+ out[..copylen].copy_from_slice(&queue.queue[queue.start..][..copylen]);
+ } else {
+ for (dst, &src) in out[..copylen].iter_mut().zip(queue.queue[queue.start..].iter()) {
+ *dst = (i32::from(src) * volume / 100).max(-32768).min(32767) as i16;
+ }
+ }
+ queue.drain(copylen);
+ for el in out[copylen..].iter_mut() { *el = 0; }
+ }
+}
+
+fn dummy_audio_thread(aprecv: Receiver<PktSendEvent>) -> JoinHandle<()> {
+ std::thread::Builder::new().name("acontrol-dummy".to_string()).spawn(move ||{
+ loop {
+ match aprecv.recv() {
+ Ok(PktSendEvent::End) => break,
+ Ok(PktSendEvent::ImmediateEnd) => break,
+ Err(_) => {
+ break;
+ },
+ _ => {},
+ };
+ }
+ }).unwrap()
+}
+
+type AudioPlaybackType = Option<AudioDevice<AudioOutput>>;
+
+fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, mut audio_dec: DecoderStuff, aprecv: Receiver<PktSendEvent>) -> (AudioPlaybackType, JoinHandle<()>) {
+ let ch = ainfo.channels.max(2);
+ let desired_spec = AudioSpecDesired {
+ freq: Some(ainfo.sample_rate as i32),
+ channels: Some(ch),
+ samples: None
+ };
+ let dst_info = NAAudioInfo {
+ sample_rate: ainfo.sample_rate,
+ channels: ch,
+ format: SND_S16_FORMAT,
+ block_len: 0,
+ };
+ let queue = Arc::new(Mutex::new(AudioQueue::new(ainfo.sample_rate as usize, ch as usize)));
+ let qclone = queue.clone();
+ let ret = asystem.open_playback(None, &desired_spec, |_spec| {
+ AudioOutput {
+ queue: qclone,
+ }
+ });
+ if ret.is_err() {
+ return (None, dummy_audio_thread(aprecv))
+ }
+ let adevice = ret.unwrap();
+ (Some(adevice), std::thread::Builder::new().name("acontrol".to_string()).spawn(move ||{
+ let mut samplepos = 0usize;
+ let dst_chmap = if dst_info.channels == 2 {
+ NAChannelMap::from_str("L,R").unwrap()
+ } else {
+ NAChannelMap::from_str("C").unwrap()
+ };
+ SKIP_ADECODING.store(false, Ordering::Relaxed);
+ loop {
+ match aprecv.recv() {
+ Ok(PktSendEvent::Packet(pkt)) => {
+ loop {
+ if CUR_QUEUE_FILL.load(Ordering::Relaxed)
+ < QUEUE_REFILL_LIMIT || SKIP_ADECODING.load(Ordering::Relaxed) {
+ break;
+ }
+ std::thread::sleep(Duration::from_millis(100));
+ }
+ if !SKIP_ADECODING.load(Ordering::Relaxed) {
+ if let Ok(frm) = audio_dec.dec.decode(&mut audio_dec.dsupp, &pkt) {
+ let buf = frm.get_buffer();
+ if let Some(pts) = frm.get_pts() {
+ samplepos = NATimeInfo::ts_to_time(pts, u64::from(dst_info.sample_rate), frm.ts.tb_num, frm.ts.tb_den) as usize;
+ }
+ samplepos += buf.get_audio_length();
+ if let Ok(out_buf) = convert_audio_frame(&buf, &dst_info, &dst_chmap) {
+ match out_buf {
+ NABufferType::AudioI16(abuf) => {
+ let mut qdata = queue.lock().unwrap();
+ qdata.add(abuf.get_data(), samplepos);
+ drop(qdata);
+ },
+ NABufferType::AudioPacked(abuf) => {
+ let mut qdata = queue.lock().unwrap();
+ qdata.add_bytes(abuf.get_data(), samplepos);
+ drop(qdata);
+ },
+ _ => {},
+ };
+ }
+ }
+ }
+ },
+ Ok(PktSendEvent::Flush) => {
+ audio_dec.dec.flush();
+ let mut qdata = queue.lock().unwrap();
+ qdata.flush();
+ SKIP_ADECODING.store(false, Ordering::Relaxed);
+ },
+ Ok(PktSendEvent::End) => break,
+ Ok(PktSendEvent::ImmediateEnd) => {
+ let mut qdata = queue.lock().unwrap();
+ qdata.flush();
+ break;
+ },
+ Ok(PktSendEvent::HurryUp) => {},
+ Err(_) => {
+ break;
+ },
+ };
+ }
+ loop {
+ let qdata = queue.lock().unwrap();
+ if qdata.fill() == 0 || SKIP_ADECODING.load(Ordering::Relaxed) {
+ break;
+ }
+ }
+ AUDIO_END.store(true, Ordering::Relaxed);
+ }).unwrap())
+}
+
+pub struct AudioControl {
+ aqueue: Vec<PktSendEvent>,
+ apsend: SyncSender<PktSendEvent>,
+ adevice: AudioPlaybackType,
+ athread: JoinHandle<()>,
+}
+
+impl AudioControl {
+ pub fn new(audio_dec: Option<DecoderStuff>, ainfo: Option<NAAudioInfo>, asystem: &AudioSubsystem) -> Self {
+ let (apsend, aprecv) = std::sync::mpsc::sync_channel::<PktSendEvent>(20);
+ let (adevice, athread) = if let Some(audio_dec) = audio_dec {
+ start_audio_decoding(asystem, ainfo.unwrap(), audio_dec, aprecv)
+ } else {
+ (None, dummy_audio_thread(aprecv))
+ };
+ AUDIO_END.store(false, Ordering::Relaxed);
+
+ Self {
+ aqueue: Vec::new(),
+ apsend,
+ adevice,
+ athread,
+ }
+ }
+ pub fn has_audio(&self) -> bool { self.adevice.is_some() }
+ pub fn pause(&mut self) {
+ if let Some(ref device) = self.adevice {
+ device.pause();
+ }
+ }
+ pub fn resume(&mut self) {
+ if let Some(ref device) = self.adevice {
+ device.resume();
+ }
+ }
+ pub fn set_volume(&mut self, volume: usize) {
+ AUDIO_VOLUME.store(volume, Ordering::Relaxed);
+ }
+ pub fn get_volume(&self) -> usize {
+ AUDIO_VOLUME.load(Ordering::Relaxed)
+ }
+ pub fn is_audio_end(&self) -> bool {
+ AUDIO_END.load(Ordering::Relaxed)
+ }
+ pub fn get_fill(&self) -> usize { CUR_QUEUE_FILL.load(Ordering::Relaxed) }
+ pub fn get_time(&self) -> Option<u64> {
+ if CURRENT_TIME_SET.load(Ordering::Relaxed) {
+ Some(CURRENT_TIME.load(Ordering::Relaxed) as u64)
+ } else {
+ None
+ }
+ }
+ pub fn get_time_left(&self) -> u64 {
+ let srate = SAMPLE_RATE.load(Ordering::Relaxed);
+ let chans = CHANNELS.load(Ordering::Relaxed);
+ if srate != 0 && chans != 0{
+ let fill = self.get_fill();
+ (fill * 1000 / srate / chans) as u64
+ } else {
+ 0
+ }
+ }
+
+ pub fn get_queue_size(&self) -> usize { self.aqueue.len() }
+ pub fn try_send_audio(&mut self, evt: PktSendEvent) -> bool {
+ if self.aqueue.len() > 0 {
+ self.aqueue.push(evt);
+ false
+ } else {
+ self.try_send_event(evt)
+ }
+ }
+ fn try_send_event(&mut self, evt: PktSendEvent) -> bool {
+ if let Err(TrySendError::Full(evt)) = self.apsend.try_send(evt) {
+ self.aqueue.insert(0, evt);
+ false
+ } else {
+ true
+ }
+ }
+ pub fn try_send_queued(&mut self) -> bool {
+ while !self.aqueue.is_empty() {
+ let pkt = self.aqueue.remove(0);
+ if !self.try_send_event(pkt) {
+ return false;
+ }
+ }
+ true
+ }
+
+ pub fn flush(&mut self) {
+ self.pause();
+ self.aqueue.truncate(0);
+ SKIP_ADECODING.store(true, Ordering::Release);
+ CURRENT_TIME_SET.store(false, Ordering::Release);
+ let _ = self.apsend.send(PktSendEvent::Flush);
+ }
+ pub fn finish(self) {
+ SKIP_ADECODING.store(true, Ordering::Release);
+ let _ = self.apsend.send(PktSendEvent::ImmediateEnd);
+ self.athread.join().unwrap();
+ }
+}
--- /dev/null
+extern crate sdl2;
+extern crate nihav_core;
+extern crate nihav_registry;
+extern crate nihav_allstuff;
+
+use std::env;
+use std::fs::File;
+use std::io::Write;
+use std::path::Path;
+use std::time::{Duration, Instant};
+use std::thread;
+
+use sdl2::event::{Event, WindowEvent};
+use sdl2::keyboard::Keycode;
+use sdl2::render::{Canvas, Texture, TextureCreator};
+use sdl2::pixels::PixelFormatEnum;
+use sdl2::video::{Window, WindowContext};
+
+use nihav_registry::detect;
+use nihav_core::frame::*;
+use nihav_core::io::byteio::{FileReader, ByteReader};
+use nihav_core::reorder::*;
+use nihav_core::codecs::*;
+use nihav_core::demuxers::*;
+use nihav_registry::register::*;
+use nihav_allstuff::*;
+
+mod audiodec;
+use audiodec::*;
+mod videodec;
+use videodec::*;
+
+#[cfg(feature="debug")]
+macro_rules! debug_log {
+ ($log: expr; $blk: block) => {
+ $log.logfile.write($blk.as_bytes()).unwrap();
+ $log.logfile.write(b"\n").unwrap();
+ };
+}
+#[cfg(not(feature="debug"))]
+macro_rules! debug_log {
+ ($log: expr; $blk: block) => {};
+}
+
+pub enum PktSendEvent {
+ Packet(NAPacket),
+ Flush,
+ End,
+ ImmediateEnd,
+ HurryUp,
+}
+
+pub struct DecoderStuff {
+ pub dsupp: Box<NADecoderSupport>,
+ pub dec: Box<dyn NADecoder + Send>,
+ pub reord: Box<dyn FrameReorderer + Send>,
+}
+
+fn format_time(ms: u64) -> String {
+ let s = ms / 1000;
+ let ds = (ms % 1000) / 100;
+ let (min, s) = (s / 60, s % 60);
+ let (h, min) = (min / 60, min % 60);
+ if h == 0 {
+ if min == 0 {
+ format!("{}.{}", s, ds)
+ } else {
+ format!("{}:{:02}.{}", min, s, ds)
+ }
+ } else {
+ format!("{}:{:02}:{:02}.{}", h, min, s, ds)
+ }
+}
+
+const FRAME_QUEUE_LEN: usize = 25;
+const MAX_VOLUME: usize = 200;
+
+pub type FrameRecord = (NABufferType, u64);
+
+pub struct TimeKeep {
+ ref_time: Instant,
+ ref_ts: u64,
+}
+
+impl TimeKeep {
+ fn new() -> Self {
+ Self {
+ ref_time: Instant::now(),
+ ref_ts: 0,
+ }
+ }
+ pub fn get_cur_time(&self) -> u64 {
+ let add = self.ref_time.elapsed().as_millis() as u64;
+ self.ref_ts + add
+ }
+ fn reset_ts(&mut self) {
+ self.ref_ts = 0;
+ }
+ fn reset_all(&mut self, ts: u64) {
+ self.ref_time = Instant::now();
+ self.ref_ts = ts;
+ }
+ fn set_ts(&mut self) {
+ self.ref_ts = self.get_cur_time();
+ }
+ fn set_time(&mut self) {
+ self.ref_time = Instant::now();
+ }
+}
+
+pub struct DispFrame<'a> {
+ pub ts: u64,
+ pub is_yuv: bool,
+ pub valid: bool,
+ pub rgb_tex: Texture<'a>,
+ pub yuv_tex: Texture<'a>,
+}
+
+pub struct DispQueue<'a> {
+ pub pool: Vec<DispFrame<'a>>,
+ pub first_ts: u64,
+ pub last_ts: u64,
+ pub start: usize,
+ pub end: usize,
+ pub len: usize,
+ pub width: usize,
+ pub height: usize,
+}
+
+impl<'a> DispQueue<'a> {
+ fn new(texture_creator: &'a TextureCreator<WindowContext>, width: usize, height: usize, len: usize) -> Self {
+ let mut pool = Vec::with_capacity(len);
+ for _ in 0..len + 1 {
+ let rgb_tex = texture_creator.create_texture_streaming(PixelFormatEnum::RGB24, width as u32, height as u32).unwrap();
+ let yuv_tex = texture_creator.create_texture_streaming(PixelFormatEnum::IYUV, ((width + 1) & !1) as u32, ((height + 1) & !1) as u32).unwrap();
+ pool.push(DispFrame{ ts: 0, is_yuv: false, valid: false, rgb_tex, yuv_tex });
+ }
+ pool[len].is_yuv = false;
+ pool[len].rgb_tex.with_lock(None, |buffer: &mut [u8], _pitch: usize| {
+ for el in buffer.iter_mut() { *el = 0; }
+ }).unwrap();
+
+ Self { pool, first_ts: 0, last_ts: 0, start: 0, end: 0, len, width, height }
+ }
+
+ fn flush(&mut self) {
+ self.start = 0;
+ self.end = 0;
+ self.first_ts = 0;
+ self.last_ts = 0;
+ for frm in self.pool.iter_mut() {
+ frm.valid = false;
+ }
+ }
+
+ fn get_last_texture(&self) -> &Texture<'a> {
+ if self.pool[self.len].is_yuv {
+ &self.pool[self.len].yuv_tex
+ } else {
+ &self.pool[self.len].rgb_tex
+ }
+ }
+ pub fn is_empty(&self) -> bool { self.start == self.end }
+ pub fn is_full(&self) -> bool { self.len == 0 || self.start == (self.end + 1) % self.len }
+ pub fn move_end(&mut self) {
+ self.end += 1;
+ if self.end >= self.len {
+ self.end -= self.len;
+ }
+ }
+ pub fn move_start(&mut self) {
+ self.pool.swap(self.start, self.len);
+ self.start += 1;
+ if self.start >= self.len {
+ self.start -= self.len;
+ }
+ if !self.is_empty() {
+ self.first_ts = self.pool[self.start].ts;
+ }
+ }
+}
+
+fn try_display(disp_queue: &mut DispQueue, canvas: &mut Canvas<Window>, ctime: &TimeKeep) -> Option<u64> {
+ while !disp_queue.is_empty() {
+ let disp_time = disp_queue.first_ts;
+ let ctime = ctime.get_cur_time();
+ if disp_time > ctime + 10 {
+ return Some(disp_time - ctime);
+ } else if disp_time + 10 < ctime {
+ disp_queue.move_start();
+ } else {
+ let frm = &disp_queue.pool[disp_queue.start];
+ let texture = if frm.is_yuv { &frm.yuv_tex } else { &frm.rgb_tex };
+ canvas.clear();
+ canvas.copy(texture, None, None).unwrap();
+ canvas.present();
+
+ disp_queue.move_start();
+ if !disp_queue.is_empty() {
+ return Some((disp_queue.first_ts - ctime).saturating_sub(2));
+ } else {
+ return None;
+ }
+ }
+ }
+ None
+}
+
+struct Player {
+ sdl_context: sdl2::Sdl,
+ vsystem: sdl2::VideoSubsystem,
+ asystem: sdl2::AudioSubsystem,
+
+ acontrol: AudioControl,
+ vcontrol: VideoControl,
+
+ play_video: bool,
+ play_audio: bool,
+ has_video: bool,
+ has_audio: bool,
+ video_str: u32,
+ audio_str: u32,
+
+ paused: bool,
+ mute: bool,
+ volume: usize,
+ end: bool,
+
+ tkeep: TimeKeep,
+
+ debug: bool,
+
+ #[cfg(feature="debug")]
+ logfile: File,
+}
+
+impl Player {
+ fn new() -> Self {
+ let sdl_context = sdl2::init().unwrap();
+ let vsystem = sdl_context.video().unwrap();
+ let asystem = sdl_context.audio().unwrap();
+ vsystem.disable_screen_saver();
+ let acontrol = AudioControl::new(None, None, &asystem);
+ let vcontrol = VideoControl::new(None, 0, 0, 0, 0);
+ Self {
+ sdl_context, asystem, vsystem,
+
+ acontrol, vcontrol,
+
+ play_video: true,
+ play_audio: true,
+ has_video: false,
+ has_audio: false,
+ video_str: 0,
+ audio_str: 0,
+
+ paused: false,
+ mute: false,
+ volume: 100,
+ end: false,
+
+ tkeep: TimeKeep::new(),
+
+ debug: false,
+
+ #[cfg(feature="debug")]
+ logfile: File::create("debug.log").unwrap(),
+ }
+ }
+ fn seek(&mut self, off: u64, fwd: bool, dmx: &mut Demuxer, disp_queue: &mut DispQueue) {
+ let cur_time = self.tkeep.get_cur_time();
+ let seektime = if fwd { cur_time + off * 1000 } else {
+ cur_time.saturating_sub(off * 1000) };
+ debug_log!(self; {format!(" seek to {}", seektime)});
+
+ let ret = dmx.seek(NATimePoint::Milliseconds(seektime));
+ if ret.is_err() {
+ println!(" seek error");
+ return;
+ }
+
+ self.acontrol.flush();
+ self.vcontrol.flush();
+ disp_queue.flush();
+
+ self.tkeep.reset_ts();
+ self.prefill(dmx, disp_queue);
+ if !disp_queue.is_empty() {
+ self.tkeep.reset_all(disp_queue.first_ts);
+ } else {
+ let mut iterations = 0;
+ let mut time = self.acontrol.get_time();
+ while time.is_none() {
+ iterations += 1;
+ std::thread::yield_now();
+ if iterations > 1000000 { println!(" still no time set?!"); break; }
+ time = self.acontrol.get_time();
+ }
+ if let Some(time) = time {
+ self.tkeep.reset_all(time);
+ }
+ }
+ if !self.paused {
+ self.acontrol.resume();
+ }
+ }
+ fn prefill(&mut self, dmx: &mut Demuxer, disp_queue: &mut DispQueue) {
+ debug_log!(self; {" prefilling"});
+ while self.vcontrol.get_queue_size() < FRAME_QUEUE_LEN {
+ let mut try_send = self.acontrol.get_queue_size() < FRAME_QUEUE_LEN && (!self.has_video || (!self.vcontrol.is_filled(FRAME_QUEUE_LEN) && !disp_queue.is_full()));
+
+ if !self.vcontrol.try_send_queued() && self.vcontrol.get_queue_size() > FRAME_QUEUE_LEN / 2 {
+ try_send = false;
+ }
+ if !self.acontrol.try_send_queued() && self.acontrol.get_queue_size() > FRAME_QUEUE_LEN / 2 {
+ try_send = false;
+ }
+ if try_send {
+ match dmx.get_frame() {
+ Err(DemuxerError::EOF) => break,
+ Err(_) => break,
+ Ok(pkt) => {
+ let streamno = pkt.get_stream().get_id();
+ if self.has_video && streamno == self.video_str {
+ self.vcontrol.try_send_video(PktSendEvent::Packet(pkt));
+ } else if self.has_audio && streamno == self.audio_str {
+ self.acontrol.try_send_audio(PktSendEvent::Packet(pkt));
+ }
+ }
+ };
+ }
+ self.vcontrol.fill(disp_queue);
+
+ if !try_send {
+ break;
+ }
+ }
+ if self.has_video {
+ while self.vcontrol.get_queue_size() > 0 && !disp_queue.is_full() {
+ self.vcontrol.try_send_queued();
+ self.vcontrol.fill(disp_queue);
+ std::thread::sleep(Duration::from_millis(10));
+ }
+ self.vcontrol.fill(disp_queue);
+ }
+ debug_log!(self; {format!(" prefilling done, frames {}-{} audio {}", disp_queue.start, disp_queue.end, self.acontrol.get_fill())});
+ }
+ fn handle_events(&mut self, event_pump: &mut sdl2::EventPump, canvas: &mut Canvas<Window>, dmx: &mut Demuxer, disp_queue: &mut DispQueue) -> bool {
+ for event in event_pump.poll_iter() {
+ if let Event::Quit {..} = event {
+ self.end = true;
+ println!();
+ return true;
+ }
+ if let Event::Window {win_event: WindowEvent::Exposed, ..} = event {
+ canvas.clear();
+ canvas.copy(disp_queue.get_last_texture(), None, None).unwrap();
+ canvas.present();
+ }
+ if let Event::KeyDown {keycode: Some(keycode), ..} = event {
+ match keycode {
+ Keycode::Escape | Keycode::Q => {
+ self.end = true;
+ println!();
+ return true;
+ },
+ Keycode::Return => return true,
+ Keycode::Right => { self.seek(10, true, dmx, disp_queue); },
+ Keycode::Left => { self.seek(10, false, dmx, disp_queue); },
+ Keycode::Up => { self.seek(60, true, dmx, disp_queue); },
+ Keycode::Down => { self.seek(60, false, dmx, disp_queue); },
+ Keycode::PageUp => { self.seek(600, true, dmx, disp_queue); },
+ Keycode::PageDown => { self.seek(600, false, dmx, disp_queue); },
+ Keycode::Space => {
+ self.paused = !self.paused;
+ if self.paused {
+ self.vsystem.enable_screen_saver();
+ self.tkeep.set_ts();
+ } else {
+ self.vsystem.disable_screen_saver();
+ self.tkeep.set_time();
+ }
+ if self.paused {
+ self.acontrol.pause();
+ } else {
+ self.acontrol.resume();
+ }
+ },
+ Keycode::Plus | Keycode::KpPlus => {
+ self.volume = (self.volume + 10).min(MAX_VOLUME);
+ if !self.mute {
+ self.acontrol.set_volume(self.volume);
+ }
+ },
+ Keycode::Minus | Keycode::KpMinus => {
+ self.volume = self.volume.saturating_sub(10);
+ if !self.mute {
+ self.acontrol.set_volume(self.volume);
+ }
+ },
+ Keycode::D => {
+ self.debug = !self.debug;
+ },
+ Keycode::M => {
+ self.mute = !self.mute;
+ if self.mute {
+ self.acontrol.set_volume(0);
+ } else {
+ self.acontrol.set_volume(self.volume);
+ }
+ },
+ Keycode::H => {
+ self.vcontrol.try_send_video(PktSendEvent::HurryUp);
+ },
+ _ => {},
+ };
+ if !self.paused {
+ print!("{:60}\r", ' ');
+ std::io::stdout().flush().unwrap();
+ }
+ }
+ }
+ false
+ }
+ fn play(&mut self, name: &str, start_time: NATimePoint) {
+ debug_log!(self; {format!("Playing {}", name)});
+
+ // prepare data source
+ let path = Path::new(name);
+ let mut file = File::open(path).unwrap();
+ let dmx_fact;
+ let mut fr = FileReader::new_read(&mut file);
+ let mut br = ByteReader::new(&mut fr);
+ let res = detect::detect_format(name, &mut br);
+ if res.is_none() {
+ println!("cannot detect format for {}", name);
+ return;
+ }
+ let (dmx_name, _score) = res.unwrap();
+ debug_log!(self; {format!(" found demuxer {} with score {:?}", dmx_name, _score)});
+ println!("trying demuxer {} on {}", dmx_name, name);
+
+ let mut dmx_reg = RegisteredDemuxers::new();
+ nihav_register_all_demuxers(&mut dmx_reg);
+ let mut dec_reg = RegisteredDecoders::new();
+ nihav_register_all_decoders(&mut dec_reg);
+
+ let ret = dmx_reg.find_demuxer(dmx_name);
+ if ret.is_none() {
+ println!("error finding {} demuxer", dmx_name);
+ return;
+ }
+ dmx_fact = ret.unwrap();
+ br.seek(SeekFrom::Start(0)).unwrap();
+ let ret = create_demuxer(dmx_fact, &mut br);
+ if ret.is_err() {
+ println!("error creating demuxer");
+ return;
+ }
+ let mut dmx = ret.unwrap();
+ if start_time != NATimePoint::None {
+ debug_log!(self; {format!(" start seek to {}", start_time)});
+ if dmx.seek(start_time).is_err() {
+ println!("initial seek failed");
+ }
+ }
+
+ let mut width = 640;
+ let mut height = 480;
+ let mut tb_num = 0;
+ let mut tb_den = 0;
+ let mut ainfo: Option<NAAudioInfo> = None;
+
+ let mut video_dec: Option<DecoderStuff> = None;
+ let mut audio_dec: Option<DecoderStuff> = None;
+
+ let duration = dmx.get_duration();
+ if duration != 0 {
+ println!(" total duration {}", format_time(duration));
+ }
+ self.has_video = false;
+ self.has_audio = false;
+ for i in 0..dmx.get_num_streams() {
+ let s = dmx.get_stream(i).unwrap();
+ let info = s.get_info();
+ let decfunc = dec_reg.find_decoder(info.get_name());
+ println!("stream {} - {} {}", i, s, info.get_name());
+ debug_log!(self; {format!(" stream {} - {} {}", i, s, info.get_name())});
+ let str_id = s.get_id();
+ if info.is_video() {
+ if video_dec.is_none() && self.play_video {
+ if let Some(decfunc) = decfunc {
+ let mut dec = (decfunc)();
+ let mut dsupp = Box::new(NADecoderSupport::new());
+ let props = info.get_properties().get_video_info().unwrap();
+ if props.get_width() != 0 {
+ width = props.get_width();
+ height = props.get_height();
+ }
+ let desc = get_codec_description(info.get_name());
+ let (reorder_depth, reord) = if desc.is_none() || (desc.unwrap().caps & CODEC_CAP_COMPLEX_REORDER) == 0 {
+ let reord: Box<dyn FrameReorderer + Send> = Box::new(IPBReorderer::new());
+ (3, reord)
+ } else {
+ let reord: Box<dyn FrameReorderer + Send> = Box::new(ComplexReorderer::new());
+ (16, reord)
+ };
+ dsupp.pool_u8 = NAVideoBufferPool::new(reorder_depth);
+ dsupp.pool_u16 = NAVideoBufferPool::new(reorder_depth);
+ dsupp.pool_u32 = NAVideoBufferPool::new(reorder_depth);
+ dec.init(&mut dsupp, info).unwrap();
+ video_dec = Some(DecoderStuff{ dsupp, dec, reord });
+ self.video_str = str_id;
+ let (tbn, tbd) = s.get_timebase();
+ tb_num = tbn;
+ tb_den = tbd;
+ self.has_video = true;
+ } else {
+ println!("no video decoder for {} found!", info.get_name());
+ }
+ }
+ } else if info.is_audio() {
+ if audio_dec.is_none() && self.play_audio {
+ if let Some(decfunc) = decfunc {
+ let mut dec = (decfunc)();
+ let mut dsupp = Box::new(NADecoderSupport::new());
+ ainfo = info.get_properties().get_audio_info();
+ dec.init(&mut dsupp, info).unwrap();
+ let reord = Box::new(NoReorderer::new());
+ audio_dec = Some(DecoderStuff{ dsupp, dec, reord });
+ self.audio_str = str_id;
+ self.has_audio = true;
+ } else {
+ println!("no audio decoder for {} found!", info.get_name());
+ }
+ }
+ } else {
+ println!("decoder {} not found", info.get_name());
+ }
+ }
+ if !self.has_video && !self.has_audio {
+ println!("No playable streams found.");
+ return;
+ }
+
+ while (width <= 384) && (height <= 288) {
+ width <<= 1;
+ height <<= 1;
+ }
+
+ // prepare playback structure
+ let mut new_vcontrol = VideoControl::new(video_dec, width, height, tb_num, tb_den);
+ std::mem::swap(&mut self.vcontrol, &mut new_vcontrol);
+
+ let mut new_acontrol = AudioControl::new(audio_dec, ainfo, &self.asystem);
+ std::mem::swap(&mut self.acontrol, &mut new_acontrol);
+
+ if self.mute {
+ self.acontrol.set_volume(0);
+ } else {
+ self.acontrol.set_volume(self.volume);
+ }
+
+ let fname = path.file_name();
+ let wname = if let Some(fname) = fname {
+ "NihAV player - ".to_owned() + fname.to_str().unwrap()
+ } else {
+ "NihAV player".to_owned()
+ };
+ let window = self.vsystem.window(&wname, width as u32, height as u32)
+ .position_centered().build().unwrap();
+ let mut canvas = window.into_canvas().build().unwrap();
+ let texture_creator = canvas.texture_creator();
+ let mut disp_q = DispQueue::new(&texture_creator, width, height, if self.has_video { FRAME_QUEUE_LEN } else { 0 });
+ if !self.has_video {
+ canvas.clear();
+ canvas.copy(disp_q.get_last_texture(), None, None).unwrap();
+ canvas.present();
+ }
+
+ self.has_audio = self.acontrol.has_audio();
+ if !self.has_video && !self.has_audio {
+ println!("No playable streams.");
+ return;
+ }
+
+ // play
+ self.prefill(&mut dmx, &mut disp_q);
+ self.tkeep.reset_all(0);
+ if !self.paused {
+ self.acontrol.resume();
+ }
+ let mut event_pump = self.sdl_context.event_pump().unwrap();
+ let mut last_disp = Instant::now();
+ let mut has_data = true;
+ 'main: loop {
+ if self.handle_events(&mut event_pump, &mut canvas, &mut dmx, &mut disp_q) {
+ println!();
+ break 'main;
+ }
+ if !self.paused {
+ let mut try_send = self.acontrol.get_queue_size() < FRAME_QUEUE_LEN && self.vcontrol.get_queue_size() < FRAME_QUEUE_LEN;
+ if !self.vcontrol.try_send_queued() && self.vcontrol.is_filled(FRAME_QUEUE_LEN) {
+ try_send = false;
+ }
+ if !self.acontrol.try_send_queued() {
+ try_send = false;
+ }
+ while has_data && try_send {
+ match dmx.get_frame() {
+ Err(DemuxerError::EOF) => {
+ self.vcontrol.try_send_video(PktSendEvent::End);
+ self.acontrol.try_send_audio(PktSendEvent::End);
+ has_data = false;
+ },
+ Err(err) => { println!("demuxer error {:?}", err); },
+ Ok(pkt) => {
+ let streamno = pkt.get_stream().get_id();
+ if self.has_video && streamno == self.video_str {
+ debug_log!(self; {" sending video packet"});
+ self.vcontrol.try_send_video(PktSendEvent::Packet(pkt));
+ if self.vcontrol.is_filled(FRAME_QUEUE_LEN) {
+ try_send = false;
+ }
+ } else if self.has_audio && streamno == self.audio_str {
+ debug_log!(self; {" sending audio packet"});
+ self.acontrol.try_send_audio(PktSendEvent::Packet(pkt));
+ if self.acontrol.get_queue_size() >= FRAME_QUEUE_LEN {
+ try_send = false;
+ }
+ }
+ }
+ };
+ }
+ self.vcontrol.fill(&mut disp_q);
+ let mut sleep_time = 25;
+ debug_log!(self; {format!(" time {}", self.tkeep.get_cur_time())});
+ if self.has_video {
+ debug_log!(self; {format!(" disp queue {}-{}, {}-{} vqueue fill {}", disp_q.first_ts, disp_q.last_ts, disp_q.start, disp_q.end, self.vcontrol.get_queue_size())});
+ let ret = try_display(&mut disp_q, &mut canvas, &self.tkeep);
+ if let Some(next_time) = ret {
+ sleep_time = sleep_time.min(next_time);
+ }
+ }
+ if self.has_audio {
+ let time_left = self.acontrol.get_time_left();
+ debug_log!(self; {format!(" audio left {}", time_left)});
+ sleep_time = sleep_time.min(time_left);
+ }
+ debug_log!(self; {format!(" sleep {}ms", sleep_time)});
+ if last_disp.elapsed().as_millis() >= 10 {
+ let c_time = self.tkeep.get_cur_time();
+
+ if !self.debug {
+ print!(" {} {}% \r", format_time(c_time), self.acontrol.get_volume());
+ } else {
+ print!(" {} {} {}% {:3} {:6}\r", format_time(c_time), if self.vcontrol.is_yuv() { 'Y' } else { 'R' }, self.acontrol.get_volume(), (disp_q.end + disp_q.len - disp_q.start) % disp_q.len, self.acontrol.get_fill());
+ }
+ std::io::stdout().flush().unwrap();
+ last_disp = Instant::now();
+ }
+ let mut end = true;
+ if self.has_video && !self.vcontrol.is_video_end() {
+ end = false;
+ }
+ if self.has_audio && !self.acontrol.is_audio_end() {
+ end = false;
+ }
+ if end {
+ break;
+ }
+ thread::sleep(Duration::from_millis(sleep_time));
+ } else {
+ thread::sleep(Duration::from_millis(20));
+ }
+ }
+ println!();
+ std::mem::swap(&mut self.vcontrol, &mut new_vcontrol);
+ new_vcontrol.finish();
+ std::mem::swap(&mut self.acontrol, &mut new_acontrol);
+ new_acontrol.finish();
+ }
+}
+
+fn main() {
+ let args: Vec<String> = env::args().collect();
+
+ if args.len() == 1 {
+ println!("usage: nihav-player file1 file2 ...");
+ return;
+ }
+
+ let mut player = Player::new();
+
+ let mut aiter = args.iter().skip(1);
+ let mut seek_time = NATimePoint::None;
+ while let Some(arg) = aiter.next() {
+ match arg.as_str() {
+ "-an" => { player.play_audio = false; },
+ "-ae" => { player.play_audio = true; },
+ "-vn" => { player.play_video = false; },
+ "-ve" => { player.play_video = true; },
+ "-seek" => {
+ if let Some(arg) = aiter.next() {
+ if let Ok(time) = arg.parse::<NATimePoint>() {
+ seek_time = time;
+ } else {
+ println!("wrong seek time");
+ seek_time = NATimePoint::None;
+ }
+ }
+ },
+ "-vol" => {
+ if let Some(arg) = aiter.next() {
+ if let Ok(vol) = arg.parse::<usize>() {
+ player.volume = vol.min(MAX_VOLUME);
+ } else {
+ println!("wrong volume");
+ }
+ }
+ },
+ "-debug" => {
+ player.debug = true;
+ },
+ "-nodebug" => {
+ player.debug = false;
+ },
+ _ => {
+ player.play(arg, seek_time);
+ if player.end { break; }
+ seek_time = NATimePoint::None;
+ },
+ };
+ }
+}
--- /dev/null
+use std::thread::JoinHandle;
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::mpsc::{Receiver, SyncSender, TrySendError};
+use std::thread;
+
+use sdl2::render::Texture;
+
+use nihav_core::frame::{NABufferType, NAVideoBuffer};
+use nihav_core::formats::*;
+use nihav_core::codecs::*;
+use nihav_core::scale::*;
+
+use super::{DecoderStuff, DispQueue, FrameRecord, PktSendEvent, FRAME_QUEUE_LEN};
+
+static SKIP_VDECODING: AtomicBool = AtomicBool::new(false);
+static VIDEO_END: AtomicBool = AtomicBool::new(false);
+
+pub const FRAME_QUEUE_SIZE: usize = 25;
+
+pub const SDL_RGB_FMT: NAPixelFormaton = NAPixelFormaton { model: ColorModel::RGB(RGBSubmodel::RGB), components: 3,
+ comp_info: [
+ Some(NAPixelChromaton { h_ss: 0, v_ss: 0, packed: true, depth: 8, shift: 0, comp_offs: 0, next_elem: 3 }),
+ Some(NAPixelChromaton { h_ss: 0, v_ss: 0, packed: true, depth: 8, shift: 0, comp_offs: 1, next_elem: 3 }),
+ Some(NAPixelChromaton { h_ss: 0, v_ss: 0, packed: true, depth: 8, shift: 0, comp_offs: 2, next_elem: 3 }),
+ None, None
+ ], elem_size: 3, be: false, alpha: false, palette: false };
+
+pub struct VideoDecoder {
+ yuv_pool: NAVideoBufferPool<u8>,
+ rgb_pool: NAVideoBufferPool<u8>,
+ tb_num: u32,
+ tb_den: u32,
+ dec: DecoderStuff,
+ ifmt: NAVideoInfo,
+ scaler: NAScale,
+ ofmt_rgb: ScaleInfo,
+ ofmt_yuv: ScaleInfo,
+ oinfo_yuv: NAVideoInfo,
+ oinfo_rgb: NAVideoInfo,
+}
+
+impl VideoDecoder {
+ pub fn new(width: usize, height: usize, tb_num: u32, tb_den: u32, dec: DecoderStuff) -> Self {
+ let ofmt_rgb = ScaleInfo { width, height, fmt: SDL_RGB_FMT };
+ let ofmt_yuv = ScaleInfo { width, height, fmt: YUV420_FORMAT };
+ let oinfo_rgb = NAVideoInfo { width, height, flipped: false, format: SDL_RGB_FMT, bits: 24 };
+ let oinfo_yuv = NAVideoInfo { width, height, flipped: false, format: YUV420_FORMAT, bits: 12 };
+ Self {
+ yuv_pool: NAVideoBufferPool::new(FRAME_QUEUE_SIZE),
+ rgb_pool: NAVideoBufferPool::new(FRAME_QUEUE_SIZE),
+ tb_num, tb_den,
+ dec, ofmt_yuv, ofmt_rgb, oinfo_yuv, oinfo_rgb,
+ scaler: NAScale::new(ofmt_rgb, ofmt_rgb).unwrap(),
+ ifmt: NAVideoInfo { width: 0, height: 0, flipped: false, format: SDL_RGB_FMT, bits: 24 },
+ }
+ }
+ fn convert_buf(&mut self, bt: NABufferType, ts: u64) -> Option<FrameRecord> {
+ let vinfo = bt.get_video_info().unwrap();
+ if self.ifmt.get_width() != vinfo.get_width() ||
+ self.ifmt.get_height() != vinfo.get_height() ||
+ self.ifmt.get_format() != vinfo.get_format() {
+ self.ifmt = vinfo;
+ let sc_ifmt = ScaleInfo { width: self.ifmt.get_width(), height: self.ifmt.get_height(), fmt: self.ifmt.get_format() };
+ let do_yuv = if let ColorModel::YUV(_) = self.ifmt.get_format().get_model() { true } else { false };
+ let ofmt = if do_yuv { self.ofmt_yuv } else { self.ofmt_rgb };
+ self.scaler = NAScale::new(sc_ifmt, ofmt).unwrap();
+ }
+ let mut opic = if let ColorModel::YUV(_) = self.ifmt.get_format().get_model() {
+ self.yuv_pool.prealloc_video(self.oinfo_yuv, 2).unwrap();
+ while self.yuv_pool.get_free().is_none() {
+ if SKIP_VDECODING.load(Ordering::Relaxed) {
+ return None;
+ }
+ std::thread::yield_now();
+ }
+ NABufferType::Video(self.yuv_pool.get_free().unwrap())
+ } else {
+ self.rgb_pool.prealloc_video(self.oinfo_rgb, 0).unwrap();
+ while self.rgb_pool.get_free().is_none() {
+ if SKIP_VDECODING.load(Ordering::Relaxed) {
+ return None;
+ }
+ std::thread::yield_now();
+ }
+ NABufferType::VideoPacked(self.rgb_pool.get_free().unwrap())
+ };
+ let ret = self.scaler.convert(&bt, &mut opic);
+ if ret.is_err() { println!(" scaler error {:?}", ret.err()); return None; }
+ ret.unwrap();
+ let time = NATimeInfo::ts_to_time(ts, 1000, self.tb_num, self.tb_den);
+ Some((opic, time))
+ }
+ pub fn next_frame(&mut self, pkt: &NAPacket) -> Option<FrameRecord> {
+ if let Ok(frm) = self.dec.dec.decode(&mut self.dec.dsupp, pkt) {
+ self.dec.reord.add_frame(frm);
+ while let Some(frm) = self.dec.reord.get_frame() {
+ let bt = frm.get_buffer();
+ if let NABufferType::None = bt { continue; }
+ let ts = frm.get_dts().unwrap_or_else(|| frm.get_pts().unwrap_or(0));
+ return self.convert_buf(bt, ts);
+ }
+ }
+ None
+ }
+ pub fn last_frame(&mut self) -> Option<FrameRecord> {
+ while let Some(frm) = self.dec.reord.get_last_frames() {
+ let bt = frm.get_buffer();
+ if let NABufferType::None = bt { continue; }
+ let ts = frm.get_dts().unwrap_or_else(|| frm.get_pts().unwrap_or(0));
+ return self.convert_buf(bt, ts);
+ }
+ None
+ }
+ pub fn flush(&mut self) {
+ self.dec.dec.flush();
+ self.dec.reord.flush();
+ }
+}
+
+fn start_video_decoding(width: usize, height: usize, tb_num: u32, tb_den: u32, video_dec: DecoderStuff, vprecv: Receiver<PktSendEvent>, vfsend: SyncSender<(NABufferType, u64)>) -> JoinHandle<()> {
+ std::thread::Builder::new().name("vdecoder".to_string()).spawn(move ||{
+ SKIP_VDECODING.store(false, Ordering::Relaxed);
+ let mut vdec = VideoDecoder::new(width, height, tb_num, tb_den, video_dec);
+ let mut skip_mode = FrameSkipMode::None;
+ loop {
+ match vprecv.recv() {
+ Ok(PktSendEvent::Packet(pkt)) => {
+ if !SKIP_VDECODING.load(Ordering::Relaxed) {
+ if let Some((buf, time)) = vdec.next_frame(&pkt) {
+ vfsend.send((buf, time)).unwrap();
+ }
+ }
+ },
+ Ok(PktSendEvent::Flush) => {
+ vdec.flush();
+ SKIP_VDECODING.store(false, Ordering::Relaxed);
+ },
+ Ok(PktSendEvent::End) => {
+ while vdec.yuv_pool.get_free().is_some() && vdec.rgb_pool.get_free().is_some() {
+ let ret = vdec.last_frame();
+ if ret.is_none() {
+ break;
+ }
+ vfsend.send(ret.unwrap()).unwrap();
+ }
+ VIDEO_END.store(true, Ordering::Relaxed);
+ break;
+ },
+ Ok(PktSendEvent::ImmediateEnd) => {
+ VIDEO_END.store(true, Ordering::Relaxed);
+ break;
+ },
+ Ok(PktSendEvent::HurryUp) => {
+ skip_mode = skip_mode.advance();
+ vdec.dec.dec.set_options(&[NAOption{
+ name: FRAME_SKIP_OPTION,
+ value: NAValue::String(skip_mode.to_string()),
+ }]);
+ },
+ Err(_) => {
+ break;
+ },
+ };
+ }
+ }).unwrap()
+}
+
+trait Advance {
+ fn advance(&self) -> Self;
+}
+
+impl Advance for FrameSkipMode {
+ fn advance(&self) -> Self {
+ match *self {
+ FrameSkipMode::None => FrameSkipMode::KeyframesOnly,
+ FrameSkipMode::KeyframesOnly => FrameSkipMode::IntraOnly,
+ FrameSkipMode::IntraOnly => FrameSkipMode::None,
+ }
+ }
+}
+
+fn output_yuv(yuv_texture: &mut Texture, buf: &NAVideoBuffer<u8>, width: usize, height: usize) {
+ let src = buf.get_data();
+ let ysstride = buf.get_stride(0);
+ let ysrc = &src[buf.get_offset(0)..];
+ let usstride = buf.get_stride(2);
+ let usrc = &src[buf.get_offset(2)..];
+ let vsstride = buf.get_stride(1);
+ let vsrc = &src[buf.get_offset(1)..];
+ yuv_texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
+ let csize = pitch.min(width);
+ for (dline, sline) in buffer.chunks_exact_mut(pitch).take(height).zip(ysrc.chunks_exact(ysstride)) {
+ dline[..csize].copy_from_slice(&sline[..csize]);
+ }
+ let coff = pitch * height;
+ let csize = (pitch / 2).min(width / 2);
+ for (dline, sline) in buffer[coff..].chunks_exact_mut(pitch / 2).take(height/2).zip(vsrc.chunks(vsstride)) {
+ dline[..csize].copy_from_slice(&sline[..csize]);
+ }
+ let coff = pitch * height + (pitch / 2) * (height / 2);
+ for (dline, sline) in buffer[coff..].chunks_exact_mut(pitch / 2).take(height/2).zip(usrc.chunks(usstride)) {
+ dline[..csize].copy_from_slice(&sline[..csize]);
+ }
+ }).unwrap();
+}
+
+
+pub struct VideoControl {
+ vqueue: Vec<PktSendEvent>,
+ vpsend: SyncSender<PktSendEvent>,
+ vfrecv: Receiver<FrameRecord>,
+ do_yuv: bool,
+ vthread: JoinHandle<()>,
+}
+
+impl VideoControl {
+ pub fn new(video_dec: Option<DecoderStuff>, width: usize, height: usize, tb_num: u32, tb_den: u32) -> Self {
+ let (vpsend, vprecv) = std::sync::mpsc::sync_channel::<PktSendEvent>(0);
+ let (vfsend, vfrecv) = std::sync::mpsc::sync_channel::<FrameRecord>(FRAME_QUEUE_SIZE - 1);
+
+ VIDEO_END.store(false, Ordering::Relaxed);
+
+ let vthread = if let Some(video_dec) = video_dec {
+ start_video_decoding(width, height, tb_num, tb_den, video_dec, vprecv, vfsend)
+ } else {
+ thread::Builder::new().name("vdecoder-dummy".to_string()).spawn(move ||{
+ loop {
+ match vprecv.recv() {
+ Ok(PktSendEvent::End) => break,
+ Ok(PktSendEvent::ImmediateEnd) => break,
+ Err(_) => {
+ break;
+ },
+ _ => {},
+ };
+ }
+ }).unwrap()
+ };
+
+
+ Self {
+ vqueue: Vec::with_capacity(FRAME_QUEUE_LEN),
+ vpsend, vfrecv,
+ do_yuv: false,
+ vthread,
+ }
+ }
+ pub fn flush(&mut self) {
+ self.vqueue.truncate(0);
+ SKIP_VDECODING.store(true, Ordering::Release);
+ for _ in 0..8 {
+ let _ = self.vfrecv.try_recv();
+ }
+ let _ = self.vpsend.send(PktSendEvent::Flush);
+ while self.vfrecv.try_recv().is_ok() { }
+ }
+ pub fn get_queue_size(&self) -> usize { self.vqueue.len() }
+ pub fn is_filled(&self, size: usize) -> bool {
+ self.vqueue.len() >= size
+ }
+ pub fn try_send_video(&mut self, evt: PktSendEvent) -> bool {
+ if self.vqueue.len() > 0 {
+ self.vqueue.push(evt);
+ false
+ } else {
+ self.try_send_event(evt)
+ }
+ }
+ fn try_send_event(&mut self, evt: PktSendEvent) -> bool {
+ if let Err(TrySendError::Full(evt)) = self.vpsend.try_send(evt) {
+ self.vqueue.insert(0, evt);
+ false
+ } else {
+ true
+ }
+ }
+ pub fn try_send_queued(&mut self) -> bool {
+ while !self.vqueue.is_empty() {
+ let pkt = self.vqueue.remove(0);
+ if !self.try_send_event(pkt) {
+ return false;
+ }
+ }
+ true
+ }
+ pub fn is_video_end(&self) -> bool {
+ VIDEO_END.load(Ordering::Relaxed)
+ }
+
+ pub fn is_yuv(&self) -> bool { self.do_yuv }
+
+ pub fn fill(&mut self, disp_queue: &mut DispQueue) {
+ while !disp_queue.is_full() {
+ let is_empty = disp_queue.is_empty();
+ if let Ok((pic, time)) = self.vfrecv.try_recv() {
+ let buf = pic.get_vbuf().unwrap();
+ self.do_yuv = buf.get_info().get_format().get_model().is_yuv();
+ let idx = disp_queue.end;
+ disp_queue.move_end();
+ let frm = &mut disp_queue.pool[idx];
+ if !self.do_yuv {
+ let sstride = buf.get_stride(0);
+ let src = buf.get_data();
+ frm.rgb_tex.with_lock(None, |buffer: &mut [u8], pitch: usize| {
+ let csize = sstride.min(pitch);
+ for (dst, src) in buffer.chunks_mut(pitch).zip(src.chunks(sstride)) {
+ (&mut dst[..csize]).copy_from_slice(&src[..csize]);
+ }
+ true
+ }).unwrap();
+ } else {
+ output_yuv(&mut frm.yuv_tex, &buf, disp_queue.width, disp_queue.height);
+ }
+ frm.valid = true;
+ frm.is_yuv = self.do_yuv;
+ frm.ts = time;
+ if is_empty {
+ disp_queue.first_ts = time;
+ }
+ disp_queue.last_ts = time;
+ } else {
+ break;
+ }
+ }
+ }
+
+ pub fn finish(self) {
+ SKIP_VDECODING.store(true, Ordering::Release);
+ for _ in 0..8 {
+ let _ = self.vfrecv.try_recv();
+ }
+ let _ = self.vpsend.send(PktSendEvent::ImmediateEnd);
+ self.vthread.join().unwrap();
+ }
+}