]> git.nihav.org Git - nihav-player.git/blobdiff - videoplayer/src/audiodec.rs
use NAPacketiser::attach_stream() where appropriate
[nihav-player.git] / videoplayer / src / audiodec.rs
index e9da970ad577d3a3c518c8f100134cff23286b2a..1f5c52ecc3142b016b743b12f772346c1f5c69a2 100644 (file)
@@ -12,11 +12,10 @@ use nihav_core::formats::*;
 use nihav_core::codecs::*;
 use nihav_core::soundcvt::*;
 
-use super::{DecoderStuff, DecoderType, PktSendEvent};
+use super::{DecoderStuff, DecoderType, DecoderState, DecodingState, PktSendEvent};
 
-static SKIP_ADECODING: AtomicBool = AtomicBool::new(false);
+static ADEC_STATE: DecoderState = DecoderState::new();
 static AUDIO_VOLUME: AtomicUsize = AtomicUsize::new(100);
-static AUDIO_END: AtomicBool = AtomicBool::new(false);
 static CUR_QUEUE_FILL: AtomicUsize = AtomicUsize::new(0);
 static SAMPLE_RATE: AtomicUsize = AtomicUsize::new(0);
 static CHANNELS: AtomicUsize = AtomicUsize::new(0);
@@ -79,7 +78,7 @@ impl AudioQueue {
         if self.end + src.len() > self.queue.len() {
             self.queue.resize(self.end + src.len(), 0);
         }
-        self.queue[self.end..][..src.len()].copy_from_slice(&src);
+        self.queue[self.end..][..src.len()].copy_from_slice(src);
         self.end += src.len();
         self.spos = samplepos;
         self.set_time();
@@ -123,7 +122,7 @@ impl AudioCallback for AudioOutput {
     type Channel = i16;
 
     fn callback(&mut self, out: &mut [Self::Channel]) {
-        let mut queue = self.queue.lock().unwrap();
+        let mut queue = self.queue.lock().expect("audio queue should be accessible");
         let dstlen = out.len();
         let copylen = queue.fill().min(dstlen);
         let volume = AUDIO_VOLUME.load(Ordering::Relaxed) as i32;
@@ -156,20 +155,21 @@ fn dummy_audio_thread(aprecv: Receiver<PktSendEvent>) -> JoinHandle<()> {
 
 type AudioPlaybackType = Option<AudioDevice<AudioOutput>>;
 
-fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, mut audio_dec: DecoderStuff, aprecv: Receiver<PktSendEvent>) -> (AudioPlaybackType, JoinHandle<()>) {
+fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, sbr_hack: bool, mut audio_dec: DecoderStuff, aprecv: Receiver<PktSendEvent>) -> (AudioPlaybackType, JoinHandle<()>) {
     let ch = ainfo.channels.max(2);
+    let sample_rate = if !sbr_hack { ainfo.sample_rate } else { ainfo.sample_rate * 2 };
     let desired_spec = AudioSpecDesired {
-        freq:       Some(ainfo.sample_rate as i32),
+        freq:       Some(sample_rate as i32),
         channels:   Some(ch),
         samples:    None
     };
     let dst_info = NAAudioInfo {
-            sample_rate:    ainfo.sample_rate,
+            sample_rate,
             channels:       ch,
             format:         SND_S16_FORMAT,
             block_len:      0,
         };
-    let queue = Arc::new(Mutex::new(AudioQueue::new(ainfo.sample_rate as usize, ch as usize)));
+    let queue = Arc::new(Mutex::new(AudioQueue::new(sample_rate as usize, ch as usize)));
     let qclone = queue.clone();
     let ret = asystem.open_playback(None, &desired_spec, |_spec| {
             AudioOutput {
@@ -184,37 +184,40 @@ fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, mut audio_
             let adec = if let DecoderType::Audio(ref mut dec) = audio_dec.dec { dec } else { panic!("not an audio decoder!"); };
             let mut samplepos = 0usize;
             let dst_chmap = if dst_info.channels == 2 {
-                    NAChannelMap::from_str("L,R").unwrap()
+                    NAChannelMap::from_str("L,R").expect("should be able to create stereo channel map")
                 } else {
-                    NAChannelMap::from_str("C").unwrap()
+                    NAChannelMap::from_str("C").expect("should be able to create single-channel map")
                 };
-            SKIP_ADECODING.store(false, Ordering::Relaxed);
+            ADEC_STATE.set_state(DecodingState::Normal);
             loop {
                 match aprecv.recv() {
                     Ok(PktSendEvent::Packet(pkt)) => {
                         loop {
                             if CUR_QUEUE_FILL.load(Ordering::Relaxed)
- < QUEUE_REFILL_LIMIT || SKIP_ADECODING.load(Ordering::Relaxed) {
+ < QUEUE_REFILL_LIMIT || ADEC_STATE.is_flushing() {
                                 break;
                             }
                             std::thread::sleep(Duration::from_millis(100));
                         }
-                        if !SKIP_ADECODING.load(Ordering::Relaxed) {
+                        if !ADEC_STATE.is_flushing() {
                             if let Ok(frm) = adec.decode(&mut audio_dec.dsupp, &pkt) {
                                 let buf = frm.get_buffer();
                                 if let Some(pts) = frm.get_pts() {
                                     samplepos = NATimeInfo::ts_to_time(pts, u64::from(dst_info.sample_rate), frm.ts.tb_num, frm.ts.tb_den) as usize;
+                                    if sbr_hack {
+                                        samplepos >>= 2;
+                                    }
                                 }
                                 samplepos += buf.get_audio_length();
                                 if let Ok(out_buf) = convert_audio_frame(&buf, &dst_info, &dst_chmap) {
                                     match out_buf {
                                         NABufferType::AudioI16(abuf) => {
-                                            let mut qdata = queue.lock().unwrap();
+                                            let mut qdata = queue.lock().expect("audio queue should be accessible");
                                             qdata.add(abuf.get_data(), samplepos);
                                             drop(qdata);
                                         },
                                         NABufferType::AudioPacked(abuf) => {
-                                            let mut qdata = queue.lock().unwrap();
+                                            let mut qdata = queue.lock().expect("audio queue should be accessible");
                                             qdata.add_bytes(abuf.get_data(), samplepos);
                                             drop(qdata);
                                         },
@@ -227,13 +230,13 @@ fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, mut audio_
                     Ok(PktSendEvent::GetFrames) => {},
                     Ok(PktSendEvent::Flush) => {
                         adec.flush();
-                        let mut qdata = queue.lock().unwrap();
+                        let mut qdata = queue.lock().expect("audio queue should be accessible");
                         qdata.flush();
-                        SKIP_ADECODING.store(false, Ordering::Relaxed);
+                        ADEC_STATE.set_state(DecodingState::Waiting);
                     },
                     Ok(PktSendEvent::End) => break,
                     Ok(PktSendEvent::ImmediateEnd) => {
-                        let mut qdata = queue.lock().unwrap();
+                        let mut qdata = queue.lock().expect("audio queue should be accessible");
                         qdata.flush();
                         break;
                     },
@@ -244,12 +247,12 @@ fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, mut audio_
                 };
             }
             loop {
-                let qdata = queue.lock().unwrap();
-                if qdata.fill() == 0 || SKIP_ADECODING.load(Ordering::Relaxed) {
+                let qdata = queue.lock().expect("audio queue should be accessible");
+                if qdata.fill() == 0 || ADEC_STATE.is_flushing() {
                     break;
                 }
             }
-            AUDIO_END.store(true, Ordering::Relaxed);
+            ADEC_STATE.set_state(DecodingState::End);
         }).unwrap())
 }
 
@@ -261,14 +264,14 @@ pub struct AudioControl {
 }
 
 impl AudioControl {
-    pub fn new(audio_dec: Option<DecoderStuff>, ainfo: Option<NAAudioInfo>, asystem: &AudioSubsystem) -> Self {
+    pub fn new(audio_dec: Option<DecoderStuff>, ainfo: Option<NAAudioInfo>, sbr_hack: bool, asystem: &AudioSubsystem) -> Self {
         let (apsend, aprecv) = std::sync::mpsc::sync_channel::<PktSendEvent>(20);
         let (adevice, athread) = if let Some(audio_dec) = audio_dec {
-                start_audio_decoding(asystem, ainfo.unwrap(), audio_dec, aprecv)
+                start_audio_decoding(asystem, ainfo.expect("audio info should be present"), sbr_hack, audio_dec, aprecv)
             } else {
                 (None, dummy_audio_thread(aprecv))
             };
-        AUDIO_END.store(false, Ordering::Relaxed);
+        ADEC_STATE.set_state(DecodingState::Normal);
 
         Self {
             aqueue:     Vec::new(),
@@ -295,7 +298,7 @@ impl AudioControl {
         AUDIO_VOLUME.load(Ordering::Relaxed)
     }
     pub fn is_audio_end(&self) -> bool {
-        AUDIO_END.load(Ordering::Relaxed)
+        matches!(ADEC_STATE.get_state(), DecodingState::End | DecodingState::Error)
     }
     pub fn get_fill(&self) -> usize { CUR_QUEUE_FILL.load(Ordering::Relaxed) }
     pub fn get_time(&self) -> Option<u64> {
@@ -318,7 +321,7 @@ impl AudioControl {
 
     pub fn get_queue_size(&self) -> usize { self.aqueue.len() }
     pub fn try_send_audio(&mut self, evt: PktSendEvent) -> bool {
-        if self.aqueue.len() > 0 {
+        if !self.aqueue.is_empty() {
             self.aqueue.push(evt);
             false
         } else {
@@ -346,12 +349,12 @@ impl AudioControl {
     pub fn flush(&mut self) {
         self.pause();
         self.aqueue.clear();
-        SKIP_ADECODING.store(true, Ordering::Release);
+        ADEC_STATE.set_state(DecodingState::Flush);
         CURRENT_TIME_SET.store(false, Ordering::Release);
         let _ = self.apsend.send(PktSendEvent::Flush);
     }
     pub fn finish(self) {
-        SKIP_ADECODING.store(true, Ordering::Release);
+        ADEC_STATE.set_state(DecodingState::Flush);
         let _ = self.apsend.send(PktSendEvent::ImmediateEnd);
         self.athread.join().unwrap();
     }