]> git.nihav.org Git - nihav-player.git/blob - videoplayer/src/videodec.rs
ignore empty frames
[nihav-player.git] / videoplayer / src / videodec.rs
1 use std::thread::JoinHandle;
2 use std::sync::atomic::{AtomicBool, Ordering};
3 use std::sync::mpsc::{Receiver, SyncSender, TrySendError};
4 use std::thread;
5
6 use sdl2::render::Texture;
7
8 use nihav_core::frame::{NABufferType, NAVideoBuffer};
9 use nihav_core::formats::*;
10 use nihav_core::codecs::*;
11 use nihav_core::scale::*;
12
13 use super::{DecoderStuff, DispQueue, FrameRecord, PktSendEvent, FRAME_QUEUE_LEN};
14
15 static SKIP_VDECODING: AtomicBool = AtomicBool::new(false);
16 static VIDEO_END: AtomicBool = AtomicBool::new(false);
17
18 pub const FRAME_QUEUE_SIZE: usize = 25;
19
20 pub const SDL_RGB_FMT: NAPixelFormaton = NAPixelFormaton { model: ColorModel::RGB(RGBSubmodel::RGB), components: 3,
21 comp_info: [
22 Some(NAPixelChromaton { h_ss: 0, v_ss: 0, packed: true, depth: 8, shift: 0, comp_offs: 0, next_elem: 3 }),
23 Some(NAPixelChromaton { h_ss: 0, v_ss: 0, packed: true, depth: 8, shift: 0, comp_offs: 1, next_elem: 3 }),
24 Some(NAPixelChromaton { h_ss: 0, v_ss: 0, packed: true, depth: 8, shift: 0, comp_offs: 2, next_elem: 3 }),
25 None, None
26 ], elem_size: 3, be: false, alpha: false, palette: false };
27
28 pub struct VideoDecoder {
29 yuv_pool: NAVideoBufferPool<u8>,
30 rgb_pool: NAVideoBufferPool<u8>,
31 tb_num: u32,
32 tb_den: u32,
33 dec: DecoderStuff,
34 ifmt: NAVideoInfo,
35 scaler: NAScale,
36 ofmt_rgb: ScaleInfo,
37 ofmt_yuv: ScaleInfo,
38 oinfo_yuv: NAVideoInfo,
39 oinfo_rgb: NAVideoInfo,
40 }
41
42 impl VideoDecoder {
43 pub fn new(width: usize, height: usize, tb_num: u32, tb_den: u32, dec: DecoderStuff) -> Self {
44 let ofmt_rgb = ScaleInfo { width, height, fmt: SDL_RGB_FMT };
45 let ofmt_yuv = ScaleInfo { width, height, fmt: YUV420_FORMAT };
46 let oinfo_rgb = NAVideoInfo { width, height, flipped: false, format: SDL_RGB_FMT, bits: 24 };
47 let oinfo_yuv = NAVideoInfo { width, height, flipped: false, format: YUV420_FORMAT, bits: 12 };
48 Self {
49 yuv_pool: NAVideoBufferPool::new(FRAME_QUEUE_SIZE),
50 rgb_pool: NAVideoBufferPool::new(FRAME_QUEUE_SIZE),
51 tb_num, tb_den,
52 dec, ofmt_yuv, ofmt_rgb, oinfo_yuv, oinfo_rgb,
53 scaler: NAScale::new(ofmt_rgb, ofmt_rgb).unwrap(),
54 ifmt: NAVideoInfo { width: 0, height: 0, flipped: false, format: SDL_RGB_FMT, bits: 24 },
55 }
56 }
57 fn convert_buf(&mut self, bt: NABufferType, ts: u64) -> Option<FrameRecord> {
58 let vinfo = bt.get_video_info().unwrap();
59 if self.ifmt.get_width() != vinfo.get_width() ||
60 self.ifmt.get_height() != vinfo.get_height() ||
61 self.ifmt.get_format() != vinfo.get_format() {
62 self.ifmt = vinfo;
63 let sc_ifmt = ScaleInfo { width: self.ifmt.get_width(), height: self.ifmt.get_height(), fmt: self.ifmt.get_format() };
64 let do_yuv = if let ColorModel::YUV(_) = self.ifmt.get_format().get_model() { true } else { false };
65 let ofmt = if do_yuv { self.ofmt_yuv } else { self.ofmt_rgb };
66 self.scaler = NAScale::new(sc_ifmt, ofmt).unwrap();
67 }
68 let mut opic = if let ColorModel::YUV(_) = self.ifmt.get_format().get_model() {
69 self.yuv_pool.prealloc_video(self.oinfo_yuv, 2).unwrap();
70 while self.yuv_pool.get_free().is_none() {
71 if SKIP_VDECODING.load(Ordering::Relaxed) {
72 return None;
73 }
74 std::thread::yield_now();
75 }
76 NABufferType::Video(self.yuv_pool.get_free().unwrap())
77 } else {
78 self.rgb_pool.prealloc_video(self.oinfo_rgb, 0).unwrap();
79 while self.rgb_pool.get_free().is_none() {
80 if SKIP_VDECODING.load(Ordering::Relaxed) {
81 return None;
82 }
83 std::thread::yield_now();
84 }
85 NABufferType::VideoPacked(self.rgb_pool.get_free().unwrap())
86 };
87 let ret = self.scaler.convert(&bt, &mut opic);
88 if ret.is_err() { println!(" scaler error {:?}", ret.err()); return None; }
89 ret.unwrap();
90 let time = NATimeInfo::ts_to_time(ts, 1000, self.tb_num, self.tb_den);
91 Some((opic, time))
92 }
93 pub fn next_frame(&mut self, pkt: &NAPacket) -> Option<FrameRecord> {
94 if let Ok(frm) = self.dec.dec.decode(&mut self.dec.dsupp, pkt) {
95 self.dec.reord.add_frame(frm);
96 while let Some(frm) = self.dec.reord.get_frame() {
97 let bt = frm.get_buffer();
98 if let NABufferType::None = bt { continue; }
99 let ts = frm.get_dts().unwrap_or_else(|| frm.get_pts().unwrap_or(0));
100 return self.convert_buf(bt, ts);
101 }
102 }
103 None
104 }
105 pub fn last_frame(&mut self) -> Option<FrameRecord> {
106 while let Some(frm) = self.dec.reord.get_last_frames() {
107 let bt = frm.get_buffer();
108 if let NABufferType::None = bt { continue; }
109 let ts = frm.get_dts().unwrap_or_else(|| frm.get_pts().unwrap_or(0));
110 return self.convert_buf(bt, ts);
111 }
112 None
113 }
114 pub fn flush(&mut self) {
115 self.dec.dec.flush();
116 self.dec.reord.flush();
117 }
118 }
119
120 fn start_video_decoding(width: usize, height: usize, tb_num: u32, tb_den: u32, video_dec: DecoderStuff, vprecv: Receiver<PktSendEvent>, vfsend: SyncSender<(NABufferType, u64)>) -> JoinHandle<()> {
121 std::thread::Builder::new().name("vdecoder".to_string()).spawn(move ||{
122 SKIP_VDECODING.store(false, Ordering::Relaxed);
123 let mut vdec = VideoDecoder::new(width, height, tb_num, tb_den, video_dec);
124 let mut skip_mode = FrameSkipMode::None;
125 loop {
126 match vprecv.recv() {
127 Ok(PktSendEvent::Packet(pkt)) => {
128 if !SKIP_VDECODING.load(Ordering::Relaxed) {
129 if let Some((buf, time)) = vdec.next_frame(&pkt) {
130 vfsend.send((buf, time)).unwrap();
131 }
132 }
133 },
134 Ok(PktSendEvent::Flush) => {
135 vdec.flush();
136 SKIP_VDECODING.store(false, Ordering::Relaxed);
137 },
138 Ok(PktSendEvent::End) => {
139 while vdec.yuv_pool.get_free().is_some() && vdec.rgb_pool.get_free().is_some() {
140 let ret = vdec.last_frame();
141 if ret.is_none() {
142 break;
143 }
144 vfsend.send(ret.unwrap()).unwrap();
145 }
146 VIDEO_END.store(true, Ordering::Relaxed);
147 break;
148 },
149 Ok(PktSendEvent::ImmediateEnd) => {
150 VIDEO_END.store(true, Ordering::Relaxed);
151 break;
152 },
153 Ok(PktSendEvent::HurryUp) => {
154 skip_mode = skip_mode.advance();
155 vdec.dec.dec.set_options(&[NAOption{
156 name: FRAME_SKIP_OPTION,
157 value: NAValue::String(skip_mode.to_string()),
158 }]);
159 },
160 Err(_) => {
161 break;
162 },
163 };
164 }
165 }).unwrap()
166 }
167
168 trait Advance {
169 fn advance(&self) -> Self;
170 }
171
172 impl Advance for FrameSkipMode {
173 fn advance(&self) -> Self {
174 match *self {
175 FrameSkipMode::None => FrameSkipMode::KeyframesOnly,
176 FrameSkipMode::KeyframesOnly => FrameSkipMode::IntraOnly,
177 FrameSkipMode::IntraOnly => FrameSkipMode::None,
178 }
179 }
180 }
181
182 fn output_yuv(yuv_texture: &mut Texture, buf: &NAVideoBuffer<u8>, width: usize, height: usize) {
183 let src = buf.get_data();
184 let ysstride = buf.get_stride(0);
185 let ysrc = &src[buf.get_offset(0)..];
186 let usstride = buf.get_stride(2);
187 let usrc = &src[buf.get_offset(2)..];
188 let vsstride = buf.get_stride(1);
189 let vsrc = &src[buf.get_offset(1)..];
190 yuv_texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
191 let csize = pitch.min(width);
192 for (dline, sline) in buffer.chunks_exact_mut(pitch).take(height).zip(ysrc.chunks_exact(ysstride)) {
193 dline[..csize].copy_from_slice(&sline[..csize]);
194 }
195 let coff = pitch * height;
196 let csize = (pitch / 2).min(width / 2);
197 for (dline, sline) in buffer[coff..].chunks_exact_mut(pitch / 2).take(height/2).zip(vsrc.chunks(vsstride)) {
198 dline[..csize].copy_from_slice(&sline[..csize]);
199 }
200 let coff = pitch * height + (pitch / 2) * (height / 2);
201 for (dline, sline) in buffer[coff..].chunks_exact_mut(pitch / 2).take(height/2).zip(usrc.chunks(usstride)) {
202 dline[..csize].copy_from_slice(&sline[..csize]);
203 }
204 }).unwrap();
205 }
206
207
208 pub struct VideoControl {
209 vqueue: Vec<PktSendEvent>,
210 vpsend: SyncSender<PktSendEvent>,
211 vfrecv: Receiver<FrameRecord>,
212 do_yuv: bool,
213 vthread: JoinHandle<()>,
214 }
215
216 impl VideoControl {
217 pub fn new(video_dec: Option<DecoderStuff>, width: usize, height: usize, tb_num: u32, tb_den: u32) -> Self {
218 let (vpsend, vprecv) = std::sync::mpsc::sync_channel::<PktSendEvent>(0);
219 let (vfsend, vfrecv) = std::sync::mpsc::sync_channel::<FrameRecord>(FRAME_QUEUE_SIZE - 1);
220
221 VIDEO_END.store(false, Ordering::Relaxed);
222
223 let vthread = if let Some(video_dec) = video_dec {
224 start_video_decoding(width, height, tb_num, tb_den, video_dec, vprecv, vfsend)
225 } else {
226 thread::Builder::new().name("vdecoder-dummy".to_string()).spawn(move ||{
227 loop {
228 match vprecv.recv() {
229 Ok(PktSendEvent::End) => break,
230 Ok(PktSendEvent::ImmediateEnd) => break,
231 Err(_) => {
232 break;
233 },
234 _ => {},
235 };
236 }
237 }).unwrap()
238 };
239
240
241 Self {
242 vqueue: Vec::with_capacity(FRAME_QUEUE_LEN),
243 vpsend, vfrecv,
244 do_yuv: false,
245 vthread,
246 }
247 }
248 pub fn flush(&mut self) {
249 self.vqueue.clear();
250 SKIP_VDECODING.store(true, Ordering::Release);
251 for _ in 0..8 {
252 let _ = self.vfrecv.try_recv();
253 }
254 let _ = self.vpsend.send(PktSendEvent::Flush);
255 while self.vfrecv.try_recv().is_ok() { }
256 }
257 pub fn get_queue_size(&self) -> usize { self.vqueue.len() }
258 pub fn is_filled(&self, size: usize) -> bool {
259 self.vqueue.len() >= size
260 }
261 pub fn try_send_video(&mut self, evt: PktSendEvent) -> bool {
262 if self.vqueue.len() > 0 {
263 self.vqueue.push(evt);
264 false
265 } else {
266 self.try_send_event(evt)
267 }
268 }
269 fn try_send_event(&mut self, evt: PktSendEvent) -> bool {
270 if let Err(TrySendError::Full(evt)) = self.vpsend.try_send(evt) {
271 self.vqueue.insert(0, evt);
272 false
273 } else {
274 true
275 }
276 }
277 pub fn try_send_queued(&mut self) -> bool {
278 while !self.vqueue.is_empty() {
279 let pkt = self.vqueue.remove(0);
280 if !self.try_send_event(pkt) {
281 return false;
282 }
283 }
284 true
285 }
286 pub fn is_video_end(&self) -> bool {
287 VIDEO_END.load(Ordering::Relaxed)
288 }
289
290 pub fn is_yuv(&self) -> bool { self.do_yuv }
291
292 pub fn fill(&mut self, disp_queue: &mut DispQueue) {
293 while !disp_queue.is_full() {
294 let is_empty = disp_queue.is_empty();
295 if let Ok((pic, time)) = self.vfrecv.try_recv() {
296 let buf = pic.get_vbuf().unwrap();
297 self.do_yuv = buf.get_info().get_format().get_model().is_yuv();
298 let idx = disp_queue.end;
299 disp_queue.move_end();
300 let frm = &mut disp_queue.pool[idx];
301 if !self.do_yuv {
302 let sstride = buf.get_stride(0);
303 let src = buf.get_data();
304 frm.rgb_tex.with_lock(None, |buffer: &mut [u8], pitch: usize| {
305 let csize = sstride.min(pitch);
306 for (dst, src) in buffer.chunks_mut(pitch).zip(src.chunks(sstride)) {
307 (&mut dst[..csize]).copy_from_slice(&src[..csize]);
308 }
309 true
310 }).unwrap();
311 } else {
312 output_yuv(&mut frm.yuv_tex, &buf, disp_queue.width, disp_queue.height);
313 }
314 frm.valid = true;
315 frm.is_yuv = self.do_yuv;
316 frm.ts = time;
317 if is_empty {
318 disp_queue.first_ts = time;
319 }
320 disp_queue.last_ts = time;
321 } else {
322 break;
323 }
324 }
325 }
326
327 pub fn finish(self) {
328 SKIP_VDECODING.store(true, Ordering::Release);
329 for _ in 0..8 {
330 let _ = self.vfrecv.try_recv();
331 }
332 let _ = self.vpsend.send(PktSendEvent::ImmediateEnd);
333 self.vthread.join().unwrap();
334 }
335 }