]> git.nihav.org Git - nihav-player.git/blame - hwdec-vaapi/src/lib.rs
videoplayer: add hardware-accelerated H.264 video decoding support
[nihav-player.git] / hwdec-vaapi / src / lib.rs
CommitLineData
e5ccd68d
KS
1use std::collections::VecDeque;
2use std::convert::TryInto;
3use std::rc::Rc;
4
5use nihav_core::codecs::*;
6use nihav_core::io::byteio::*;
7use nihav_core::io::bitreader::*;
8use nihav_core::io::intcode::*;
9
10use libva::*;
11
12#[cfg(debug_assertions)]
13macro_rules! validate {
14 ($a:expr) => { if !$a { println!("check failed at {}:{}", file!(), line!()); return Err(DecoderError::InvalidData); } };
15}
16#[cfg(not(debug_assertions))]
17macro_rules! validate {
18 ($a:expr) => { if !$a { return Err(DecoderError::InvalidData); } };
19}
20
21mod pic_ref;
22pub use pic_ref::*;
23#[allow(clippy::manual_range_contains)]
24#[allow(clippy::needless_range_loop)]
25mod sets;
26use sets::*;
27#[allow(clippy::manual_range_contains)]
28mod slice;
29use slice::*;
30
31trait ReadUE {
32 fn read_ue(&mut self) -> DecoderResult<u32>;
33 fn read_te(&mut self, range: u32) -> DecoderResult<u32>;
34 fn read_ue_lim(&mut self, max_val: u32) -> DecoderResult<u32> {
35 let val = self.read_ue()?;
36 validate!(val <= max_val);
37 Ok(val)
38 }
39 fn read_se(&mut self) -> DecoderResult<i32> {
40 let val = self.read_ue()?;
41 if (val & 1) != 0 {
42 Ok (((val >> 1) as i32) + 1)
43 } else {
44 Ok (-((val >> 1) as i32))
45 }
46 }
47}
48
49impl<'a> ReadUE for BitReader<'a> {
50 fn read_ue(&mut self) -> DecoderResult<u32> {
51 Ok(self.read_code(UintCodeType::GammaP)? - 1)
52 }
53 fn read_te(&mut self, range: u32) -> DecoderResult<u32> {
54 if range == 1 {
55 if self.read_bool()? {
56 Ok(0)
57 } else {
58 Ok(1)
59 }
60 } else {
61 let val = self.read_ue()?;
62 validate!(val <= range);
63 Ok(val)
64 }
65 }
66}
67
68fn get_long_term_id(is_idr: bool, slice_hdr: &SliceHeader) -> Option<usize> {
69 if is_idr && !slice_hdr.long_term_reference {
70 None
71 } else {
72 let marking = &slice_hdr.adaptive_ref_pic_marking;
73 for (&op, &arg) in marking.memory_management_control_op.iter().zip(marking.operation_arg.iter()).take(marking.num_ops) {
74 if op == 6 {
75 return Some(arg as usize);
76 }
77 }
78 None
79 }
80}
81
82fn unescape_nal(src: &[u8], dst: &mut Vec<u8>) -> usize {
83 let mut off = 0;
84 let mut zrun = 0;
85 dst.clear();
86 dst.reserve(src.len());
87 while off < src.len() {
88 dst.push(src[off]);
89 if src[off] != 0 {
90 zrun = 0;
91 } else {
92 zrun += 1;
93 if zrun == 2 && off + 1 < src.len() && src[off + 1] == 0x03 {
94 zrun = 0;
95 off += 1;
96 }
97 if zrun >= 3 && off + 1 < src.len() && src[off + 1] == 0x01 {
98 off -= 3;
99 dst.truncate(off);
100 break;
101 }
102 }
103 off += 1;
104 }
105 off
106}
107
108fn make_dummy_h264_pic() -> PictureH264 {
109 PictureH264::new(VA_INVALID_ID, 0, H264PictureFlag::Invalid.into(), 0, 0)
110}
111
112trait MakePicH264 {
113 fn make_pic(&self) -> PictureH264;
114}
115
116impl MakePicH264 for PictureInfo {
117 fn make_pic(&self) -> PictureH264 {
118 let mut flags = H264PictureFlags::default();
119 let frame_idx = if let Some(id) = self.long_term {
120 flags |= H264PictureFlag::LongTermReference;
121 id as u32
122 } else {
123 if self.is_ref {
124 flags |= H264PictureFlag::ShortTermReference;
125 }
126 u32::from(self.id)
127 };
128 PictureH264::new(self.surface_id, frame_idx, flags, self.top_id as i32, self.bot_id as i32)
129 }
130}
131
132fn map_ref_list(refs: &[Option<PictureInfo>]) -> [PictureH264; 32] {
133 let mut ref_list = Vec::with_capacity(32);
134
135 for rpic in refs.iter() {
136 ref_list.push(rpic.as_ref().map_or_else(make_dummy_h264_pic, |pic| pic.make_pic()));
137 }
138
139 while ref_list.len() < 32 {
140 ref_list.push(make_dummy_h264_pic());
141 }
142 if let Ok(ret) = ref_list.try_into() {
143 ret
144 } else {
145 panic!("can't convert");
146 }
147}
148
149fn profile_name(profile: VAProfile::Type) -> &'static str {
150 match profile {
151 VAProfile::VAProfileMPEG2Simple => "MPEG2 Simple",
152 VAProfile::VAProfileMPEG2Main => "MPEG2 Main",
153 VAProfile::VAProfileMPEG4Simple => "MPEG4 Simple",
154 VAProfile::VAProfileMPEG4AdvancedSimple => "MPEG4 Advanced Simple",
155 VAProfile::VAProfileMPEG4Main => "MPEG4 Main",
156 VAProfile::VAProfileH264Baseline => "H264 Baseline",
157 VAProfile::VAProfileH264Main => "H264 Main",
158 VAProfile::VAProfileH264High => "H264 High",
159 VAProfile::VAProfileVC1Simple => "VC1 Simple",
160 VAProfile::VAProfileVC1Main => "VC1 Main",
161 VAProfile::VAProfileVC1Advanced => "VC1 Advanced",
162 VAProfile::VAProfileH263Baseline => "H263 Baseline",
163 VAProfile::VAProfileJPEGBaseline => "JPEG Baseline",
164 VAProfile::VAProfileH264ConstrainedBaseline => "H264 Constrained Baseline",
165 VAProfile::VAProfileVP8Version0_3 => "VP8",
166 VAProfile::VAProfileH264MultiviewHigh => "H.264 Multiview High",
167 VAProfile::VAProfileH264StereoHigh => "H264 Stereo High",
168 VAProfile::VAProfileHEVCMain => "H.EVC Main",
169 VAProfile::VAProfileHEVCMain10 => "H.EVC Main10",
170 VAProfile::VAProfileVP9Profile0 => "VP9 Profile 0",
171 VAProfile::VAProfileVP9Profile1 => "VP9 Profile 1",
172 VAProfile::VAProfileVP9Profile2 => "VP9 Profile 2",
173 VAProfile::VAProfileVP9Profile3 => "VP9 Profile 3",
174 VAProfile::VAProfileHEVCMain12 => "HEVC Main12",
175 VAProfile::VAProfileHEVCMain422_10 => "HEVC Main10 4:2:2",
176 VAProfile::VAProfileHEVCMain422_12 => "HEVC Main12 4:2:2",
177 VAProfile::VAProfileHEVCMain444 => "HEVC Main 4:4:4",
178 VAProfile::VAProfileHEVCMain444_10 => "HEVC Main10 4:4:4",
179 VAProfile::VAProfileHEVCMain444_12 => "HEVC Main12 4:4:4",
180 VAProfile::VAProfileHEVCSccMain => "HEVC SCC Main",
181 VAProfile::VAProfileHEVCSccMain10 => "HEVC SCC Main10",
182 VAProfile::VAProfileHEVCSccMain444 => "HEVC SCC Main 4:4:4",
183 VAProfile::VAProfileAV1Profile0 => "AV1 Profile 0",
184 VAProfile::VAProfileAV1Profile1 => "AV1 Profile 1",
185 VAProfile::VAProfileHEVCSccMain444_10 => "HEVC SCC Main10 4:4:4",
186 _ => "unknown",
187 }
188}
189
190const NUM_REF_PICS: usize = 16;
191
192struct WaitingFrame {
193 ts: u64,
194 pic: Picture<PictureEnd>,
195 is_idr: bool,
196 is_ref: bool,
197 ftype: FrameType,
198}
199
200struct Reorderer {
201 last_ref_dts: Option<u64>,
202 ready_idx: usize,
203 frames: VecDeque<WaitingFrame>,
204}
205
206impl Default for Reorderer {
207 fn default() -> Self {
208 Self {
209 last_ref_dts: None,
210 ready_idx: 0,
211 frames: VecDeque::with_capacity(16),
212 }
213 }
214}
215
216impl Reorderer {
217 fn add_frame(&mut self, new_frame: WaitingFrame) {
218 if !new_frame.is_ref {
219 if self.frames.is_empty() {
220 self.frames.push_back(new_frame);
221 } else {
222 let new_dts = new_frame.ts;
223 let mut idx = 0;
224 for (i, frm) in self.frames.iter().enumerate() {
225 idx = i;
226 if frm.ts > new_dts {
227 break;
228 }
229 }
230 self.frames.insert(idx, new_frame);
231 }
232 } else {
233 for (i, frm) in self.frames.iter().enumerate() {
234 if Some(frm.ts) == self.last_ref_dts {
235 self.ready_idx = i + 1;
236 }
237 }
238 self.last_ref_dts = Some(new_frame.ts);
239 self.frames.push_back(new_frame);
240 }
241 }
242 fn get_frame(&mut self) -> Option<WaitingFrame> {
243 if self.ready_idx > 0 {
244 match self.frames[0].pic.query_status() {
245 _ if self.ready_idx > 16 => {},
246 Ok(VASurfaceStatus::Ready) => {},
247 Ok(VASurfaceStatus::Rendering) => return None,
248 _ => {
249 unimplemented!();
250 },
251 };
252 self.ready_idx -= 1;
253 self.frames.pop_front()
254 } else {
255 None
256 }
257 }
258 fn flush(&mut self) {
259 self.last_ref_dts = None;
260 self.ready_idx = 0;
261 }
262}
263
264#[allow(dead_code)]
265struct VaapiInternals {
266 display: Rc<Display>,
267 context: Rc<Context>,
268 ref_pics: Vec<(Picture<PictureSync>, VASurfaceID)>,
269 surfaces: Vec<Surface>,
270 ifmt: VAImageFormat,
271}
272
273pub struct VaapiH264Decoder {
274 info: NACodecInfoRef,
275 vaapi: Option<VaapiInternals>,
276 spses: Vec<SeqParameterSet>,
277 ppses: Vec<PicParameterSet>,
278 frame_refs: FrameRefs,
279 nal_len: u8,
280 out_frm: NABufferType,
281 reorderer: Reorderer,
282 tb_num: u32,
283 tb_den: u32,
284}
285
286fn fill_frame(ifmt: VAImageFormat, pic: &Picture<PictureSync>, frm: &mut NABufferType) -> DecoderResult<()> {
287 let mut vbuf = frm.get_vbuf().unwrap();
288 let (w, h) = pic.surface_size();
289 //let cur_ts = pic.timestamp();
290
291 let img = Image::new(pic, ifmt, w, h, true).expect("get image");
292
293 let iimg = img.image();
294 let imgdata: &[u8] = img.as_ref();
295
296 match iimg.format.fourcc().map_err(|_| DecoderError::InvalidData)? {
297 VAFourcc::NV12 => {
298 let frm = NASimpleVideoFrame::from_video_buf(&mut vbuf).unwrap();
299 validate!(iimg.width == (frm.width[0] as u16));
300 validate!(iimg.height == (frm.height[0] as u16));
301
302 for (dline, sline) in frm.data[frm.offset[0]..].chunks_mut(frm.stride[0])
303 .zip(imgdata[iimg.offsets[0] as usize..].chunks(iimg.pitches[0] as usize))
304 .take(frm.height[0]) {
305 dline[..frm.width[0]].copy_from_slice(&sline[..frm.width[0]]);
306 }
307
308 let mut uoff = frm.offset[1];
309 let mut voff = frm.offset[2];
310 for cline in imgdata[iimg.offsets[1] as usize..].chunks(iimg.pitches[1] as usize).take(frm.height[1]) {
311 for (x, pair) in cline.chunks_exact(2).take(frm.width[1]).enumerate() {
312 frm.data[uoff + x] = pair[0];
313 frm.data[voff + x] = pair[1];
314 }
315 uoff += frm.stride[1];
316 voff += frm.stride[2];
317 }
318 },
319 _ => unimplemented!(),
320 };
321 Ok(())
322}
323
324impl Default for VaapiH264Decoder {
325 fn default() -> Self {
326 Self {
327 info: NACodecInfoRef::default(),
328 vaapi: None,
329 spses: Vec::with_capacity(1),
330 ppses: Vec::with_capacity(4),
331 frame_refs: FrameRefs::new(),
332 nal_len: 0,
333 out_frm: NABufferType::None,
334 reorderer: Reorderer::default(),
335 tb_num: 0,
336 tb_den: 0,
337 }
338 }
339}
340
341impl VaapiH264Decoder {
342 pub fn new() -> Self { Self::default() }
343 pub fn init(&mut self, info: NACodecInfoRef) -> DecoderResult<()> {
344 if let NACodecTypeInfo::Video(vinfo) = info.get_properties() {
345 let edata = info.get_extradata().unwrap();
346//print!("edata:"); for &el in edata.iter() { print!(" {:02X}", el); } println!();
347 let profile;
348 let mut nal_buf = Vec::with_capacity(1024);
349 if edata.len() > 11 && &edata[0..4] == b"avcC" {
350 let mut mr = MemoryReader::new_read(edata.as_slice());
351 let mut br = ByteReader::new(&mut mr);
352
353 br.read_skip(4)?;
354 let version = br.read_byte()?;
355 validate!(version == 1);
356 profile = br.read_byte()?;
357 let _compatibility = br.read_byte()?;
358 let _level = br.read_byte()?;
359 let b = br.read_byte()?;
360 validate!((b & 0xFC) == 0xFC);
361 self.nal_len = (b & 3) + 1;
362 let b = br.read_byte()?;
363 validate!((b & 0xE0) == 0xE0);
364 let num_sps = (b & 0x1F) as usize;
365 for _ in 0..num_sps {
366 let len = br.read_u16be()? as usize;
367 let offset = br.tell() as usize;
368 validate!((br.peek_byte()? & 0x1F) == 7);
369 let _size = unescape_nal(&edata[offset..][..len], &mut nal_buf);
370 br.read_skip(len)?;
371 let sps = parse_sps(&nal_buf[1..])?;
372 self.spses.push(sps);
373 }
374 let num_pps = br.read_byte()? as usize;
375 for _ in 0..num_pps {
376 let len = br.read_u16be()? as usize;
377 let offset = br.tell() as usize;
378 validate!((br.peek_byte()? & 0x1F) == 8);
379 let _size = unescape_nal(&edata[offset..][..len], &mut nal_buf);
380 br.read_skip(len)?;
381 let src = &nal_buf;
382
383 let mut full_size = src.len() * 8;
384 for &byte in src.iter().rev() {
385 if byte == 0 {
386 full_size -= 8;
387 } else {
388 full_size -= (byte.trailing_zeros() + 1) as usize;
389 break;
390 }
391 }
392 validate!(full_size > 0);
393
394 let pps = parse_pps(&src[1..], &self.spses, full_size - 8)?;
395 let mut found = false;
396 for stored_pps in self.ppses.iter_mut() {
397 if stored_pps.pic_parameter_set_id == pps.pic_parameter_set_id {
398 *stored_pps = pps.clone();
399 found = true;
400 break;
401 }
402 }
403 if !found {
404 self.ppses.push(pps);
405 }
406 }
407 if br.left() > 0 {
408 match profile {
409 100 | 110 | 122 | 144 => {
410 let b = br.read_byte()?;
411 validate!((b & 0xFC) == 0xFC);
412 // b & 3 -> chroma format
413 let b = br.read_byte()?;
414 validate!((b & 0xF8) == 0xF8);
415 // b & 7 -> luma depth minus 8
416 let b = br.read_byte()?;
417 validate!((b & 0xF8) == 0xF8);
418 // b & 7 -> chroma depth minus 8
419 let num_spsext = br.read_byte()? as usize;
420 for _ in 0..num_spsext {
421 let len = br.read_u16be()? as usize;
422 // parse spsext
423 br.read_skip(len)?;
424 }
425 },
426 _ => {},
427 };
428 }
429 } else {
430 return Err(DecoderError::NotImplemented);
431 }
432
433 validate!(profile > 0);
434 let width = (vinfo.get_width() + 15) & !15;
435 let height = (vinfo.get_height() + 15) & !15;
436
437 let display = Display::open_silently().expect("open display");
438
439 let num_surfaces = self.spses[0].num_ref_frames + 4 + 64;
440
441 let va_profile = match profile {
442 66 => VAProfile::VAProfileH264ConstrainedBaseline,
443 77 => VAProfile::VAProfileH264Main,
444 88 | 100 | 110 | 122 => VAProfile::VAProfileH264High,
445 _ => return Err(DecoderError::NotImplemented),
446 };
447 if let Ok(profiles) = display.query_config_profiles() {
448 if !profiles.contains(&va_profile) {
449println!("Profile {} ({}) not supported", profile, profile_name(va_profile));
450 return Err(DecoderError::NotImplemented);
451 }
452 } else {
453 return Err(DecoderError::Bug);
454 }
455 if let Ok(points) = display.query_config_entrypoints(va_profile) {
456 if !points.contains(&VAEntrypoint::VAEntrypointVLD) {
457println!("no decoding support for this profile");
458 return Err(DecoderError::NotImplemented);
459 }
460 } else {
461 return Err(DecoderError::Bug);
462 }
463
464 let config = display.create_config(vec![
465 VAConfigAttrib { type_: VAConfigAttribType::VAConfigAttribRTFormat, value: RTFormat::YUV420.into() },
466 ], va_profile, VAEntrypoint::VAEntrypointVLD).map_err(|_| {
467println!("config creation failed!");
468 DecoderError::Bug
469 })?;
470 let surfaces = display.create_surfaces(RTFormat::YUV420, None, width as u32, height as u32, Some(UsageHint::Decoder.into()), num_surfaces as u32).map_err(|_| DecoderError::AllocError)?;
471 let context = display.create_context(&config, width as i32, height as i32, Some(&surfaces), true).map_err(|_| DecoderError::Bug)?;
472
473 let ref_pics = Vec::new();
474
475 let image_formats = display.query_image_formats().map_err(|_| DecoderError::Bug)?;
476 validate!(!image_formats.is_empty());
477 let mut ifmt = image_formats[0];
478 for fmt in image_formats.iter() {
479 if fmt.bits_per_pixel == 12 {
480 ifmt = *fmt;
481 break;
482 }
483 }
484
485 self.vaapi = Some(VaapiInternals { display, context, ref_pics, surfaces, ifmt });
486
487 let vinfo = NAVideoInfo::new(width, height, false, YUV420_FORMAT);
488 self.info = NACodecInfo::new_ref(info.get_name(), NACodecTypeInfo::Video(vinfo), info.get_extradata()).into_ref();
489 self.out_frm = alloc_video_buffer(vinfo, 4)?;
490
491 Ok(())
492 } else {
493 Err(DecoderError::InvalidData)
494 }
495 }
496 fn decode(&mut self, pkt: &NAPacket) -> DecoderResult<()> {
497 let src = pkt.get_buffer();
498 let vactx = if let Some(ref mut ctx) = self.vaapi { ctx } else { return Err(DecoderError::Bug) };
499
500 let timestamp = pkt.get_dts().unwrap_or_else(|| pkt.get_pts().unwrap_or(0));
501
502 if vactx.surfaces.is_empty() {
503panic!("ran out of free surfaces");
504// return Err(DecoderError::AllocError);
505 }
506 let surface = vactx.surfaces.pop().unwrap();
507 let surface_id = surface.id();
508 let mut pic = Picture::new(timestamp, vactx.context.clone(), surface);
509 let mut is_ref = false;
510 let mut is_keyframe = false;
511
512 self.tb_num = pkt.ts.tb_num;
513 self.tb_den = pkt.ts.tb_den;
514
515 let mut mr = MemoryReader::new_read(&src);
516 let mut br = ByteReader::new(&mut mr);
517 let mut frame_type = FrameType::I;
518 let mut nal_buf = Vec::with_capacity(1024);
519 while br.left() > 0 {
520 let size = match self.nal_len {
521 1 => br.read_byte()? as usize,
522 2 => br.read_u16be()? as usize,
523 3 => br.read_u24be()? as usize,
524 4 => br.read_u32be()? as usize,
525 _ => unreachable!(),
526 };
527 validate!(br.left() >= (size as i64));
528 let offset = br.tell() as usize;
529 let raw_nal = &src[offset..][..size];
530 let _size = unescape_nal(raw_nal, &mut nal_buf);
531
532 let src = &nal_buf;
533 validate!((src[0] & 0x80) == 0);
534 let nal_ref_idc = src[0] >> 5;
535 let nal_unit_type = src[0] & 0x1F;
536
537 let mut full_size = src.len() * 8;
538 for &byte in src.iter().rev() {
539 if byte == 0 {
540 full_size -= 8;
541 } else {
542 full_size -= (byte.trailing_zeros() + 1) as usize;
543 break;
544 }
545 }
546 validate!(full_size > 0);
547
548 match nal_unit_type {
549 1 | 5 => {
550 let is_idr = nal_unit_type == 5;
551 is_ref |= nal_ref_idc != 0;
552 is_keyframe |= is_idr;
553 let mut br = BitReader::new(&src[..(full_size + 7)/8], BitReaderMode::BE);
554 br.skip(8)?;
555
556 let slice_hdr = parse_slice_header(&mut br, &self.spses, &self.ppses, is_idr, nal_ref_idc)?;
557 match slice_hdr.slice_type {
558 SliceType::P if frame_type != FrameType::B => frame_type = FrameType::P,
559 SliceType::SP if frame_type != FrameType::B => frame_type = FrameType::P,
560 SliceType::B => frame_type = FrameType::B,
561 _ => {},
562 };
563 let mut cur_sps = 0;
564 let mut cur_pps = 0;
565 let mut pps_found = false;
566 for (i, pps) in self.ppses.iter().enumerate() {
567 if pps.pic_parameter_set_id == slice_hdr.pic_parameter_set_id {
568 cur_pps = i;
569 pps_found = true;
570 break;
571 }
572 }
573 validate!(pps_found);
574 let mut sps_found = false;
575 for (i, sps) in self.spses.iter().enumerate() {
576 if sps.seq_parameter_set_id == self.ppses[cur_pps].seq_parameter_set_id {
577 cur_sps = i;
578 sps_found = true;
579 break;
580 }
581 }
582 validate!(sps_found);
583 let sps = &self.spses[cur_sps];
584 let pps = &self.ppses[cur_pps];
585
586 if slice_hdr.first_mb_in_slice == 0 {
587 let (top_id, bot_id) = self.frame_refs.calc_picture_num(&slice_hdr, is_idr, nal_ref_idc, sps);
588 if is_idr {
589 self.frame_refs.clear_refs();
590 for (pic, _) in vactx.ref_pics.drain(..) {
591 if let Ok(surf) = pic.take_surface() {
592 vactx.surfaces.push(surf);
593 } else {
594 panic!("can't take surface");
595 }
596 }
597 }
598 self.frame_refs.select_refs(sps, &slice_hdr, top_id);
599 let mut pic_refs = Vec::with_capacity(NUM_REF_PICS);
600 for pic in self.frame_refs.ref_pics.iter().rev().take(NUM_REF_PICS) {
601 pic_refs.push(pic.make_pic());
602 }
603 if slice_hdr.adaptive_ref_pic_marking_mode {
604 self.frame_refs.apply_adaptive_marking(&slice_hdr.adaptive_ref_pic_marking, slice_hdr.frame_num, 1 << sps.log2_max_frame_num)?;
605 }
606
607 while pic_refs.len() < NUM_REF_PICS {
608 pic_refs.push(make_dummy_h264_pic());
609 }
610
611 let mut flags = H264PictureFlags::default();
612 let frame_idx = if let Some(id) = get_long_term_id(is_idr, &slice_hdr) {
613 flags |= H264PictureFlag::LongTermReference;
614 id as u32
615 } else {
616 if nal_ref_idc != 0 {
617 flags |= H264PictureFlag::ShortTermReference;
618 }
619 u32::from(slice_hdr.frame_num)
620 };
621 let pic_refs: [PictureH264; NUM_REF_PICS] = pic_refs.try_into().unwrap_or_else(|_| panic!("can't convert"));
622
623 let h264pic = PictureH264::new(surface_id, frame_idx, flags, top_id as i32, bot_id as i32);
624
625 let seq_fields = H264SeqFields::new(
626 u32::from(sps.chroma_format_idc),
627 u32::from(sps.separate_colour_plane),
628 u32::from(sps.gaps_in_frame_num_value_allowed),
629 u32::from(sps.frame_mbs_only),
630 u32::from(sps.mb_adaptive_frame_field),
631 u32::from(sps.direct_8x8_inference),
632 u32::from(sps.level_idc >= 31),
633 u32::from(sps.log2_max_frame_num) - 4,
634 u32::from(sps.pic_order_cnt_type),
635 u32::from(sps.log2_max_pic_order_cnt_lsb).wrapping_sub(4),
636 u32::from(sps.delta_pic_order_always_zero)
637 );
638 let pic_fields = H264PicFields::new(
639 u32::from(pps.entropy_coding_mode),
640 u32::from(pps.weighted_pred),
641 u32::from(pps.weighted_bipred_idc),
642 u32::from(pps.transform_8x8_mode),
643 u32::from(slice_hdr.field_pic),
644 u32::from(pps.constrained_intra_pred),
645 u32::from(pps.pic_order_present),
646 u32::from(pps.deblocking_filter_control_present),
647 u32::from(pps.redundant_pic_cnt_present),
648 u32::from(nal_ref_idc != 0)
649 );
650 let ppd = PictureParameterBufferH264::new(
651 h264pic,
652 pic_refs,
653 sps.pic_width_in_mbs as u16 - 1,
654 sps.pic_height_in_mbs as u16 - 1,
655 sps.bit_depth_luma - 8,
656 sps.bit_depth_chroma - 8,
657 sps.num_ref_frames as u8,
658 &seq_fields,
659 pps.num_slice_groups as u8 - 1, // should be 0
660 pps.slice_group_map_type, // should be 0
661 0, //pps.slice_group_change_rate as u16 - 1,
662 pps.pic_init_qp as i8 - 26,
663 pps.pic_init_qs as i8 - 26,
664 pps.chroma_qp_index_offset,
665 pps.second_chroma_qp_index_offset,
666 &pic_fields,
667 slice_hdr.frame_num
668 );
669 let pic_param = BufferType::PictureParameter(PictureParameter::H264(ppd));
670 let buf = vactx.context.create_buffer(pic_param).map_err(|_| DecoderError::Bug)?;
671 pic.add_buffer(buf);
672
673 let mut scaling_list_8x8 = [[0; 64]; 2];
674 scaling_list_8x8[0].copy_from_slice(&pps.scaling_list_8x8[0]);
675 scaling_list_8x8[1].copy_from_slice(&pps.scaling_list_8x8[3]);
676 let iqmatrix = BufferType::IQMatrix(IQMatrix::H264(IQMatrixBufferH264::new(pps.scaling_list_4x4, scaling_list_8x8)));
677 let buf = vactx.context.create_buffer(iqmatrix).map_err(|_| DecoderError::Bug)?;
678 pic.add_buffer(buf);
679
680 let cpic = PictureInfo {
681 id: slice_hdr.frame_num,
682 full_id: top_id,
683 surface_id,
684 top_id, bot_id,
685 //pic_type: slice_hdr.slice_type.to_frame_type(),
686 is_ref,
687 is_idr,
688 long_term: get_long_term_id(is_idr, &slice_hdr),
689 };
690 if cpic.is_ref {
691 self.frame_refs.add_short_term(cpic.clone(), sps.num_ref_frames);
692 }
693 if let Some(lt_idx) = cpic.long_term {
694 self.frame_refs.add_long_term(lt_idx, cpic);
695 }
696 }
697
698 let mut luma_weight_l0 = [0i16; 32];
699 let mut luma_offset_l0 = [0i16; 32];
700 let mut chroma_weight_l0 = [[0i16; 2]; 32];
701 let mut chroma_offset_l0 = [[0i16; 2]; 32];
702 let mut luma_weight_l1 = [0i16; 32];
703 let mut luma_offset_l1 = [0i16; 32];
704 let mut chroma_weight_l1 = [[0i16; 2]; 32];
705 let mut chroma_offset_l1 = [[0i16; 2]; 32];
706 let mut luma_weighted_l0 = false;
707 let mut chroma_weighted_l0 = false;
708 let mut luma_weighted_l1 = false;
709 let mut chroma_weighted_l1 = false;
710 let mut luma_log2_weight_denom = slice_hdr.luma_log2_weight_denom;
711 let mut chroma_log2_weight_denom = slice_hdr.chroma_log2_weight_denom;
712
713 if (pps.weighted_pred && matches!(slice_hdr.slice_type, SliceType::P | SliceType::B)) || (pps.weighted_bipred_idc == 1 && slice_hdr.slice_type == SliceType::B) {
714 luma_weighted_l0 = true;
715 chroma_weighted_l0 = false;
716 for (i, winfo) in slice_hdr.weights_l0.iter().enumerate().take(slice_hdr.num_ref_idx_l0_active) {
717 if winfo.luma_weighted {
718 luma_weight_l0[i] = winfo.luma_weight.into();
719 luma_offset_l0[i] = winfo.luma_offset.into();
720 } else {
721 luma_weight_l0[i] = 1 << slice_hdr.luma_log2_weight_denom;
722 }
723 if winfo.chroma_weighted {
724 chroma_weight_l0[i][0] = winfo.chroma_weight[0].into();
725 chroma_weight_l0[i][1] = winfo.chroma_weight[1].into();
726 chroma_offset_l0[i][0] = winfo.chroma_offset[0].into();
727 chroma_offset_l0[i][1] = winfo.chroma_offset[1].into();
728 } else {
729 chroma_weight_l0[i][0] = 1 << slice_hdr.chroma_log2_weight_denom;
730 chroma_weight_l0[i][1] = 1 << slice_hdr.chroma_log2_weight_denom;
731 chroma_offset_l0[i][0] = 0;
732 chroma_offset_l0[i][1] = 0;
733 }
734 chroma_weighted_l0 |= winfo.chroma_weighted;
735 }
736 }
737 if pps.weighted_bipred_idc == 1 && slice_hdr.slice_type == SliceType::B {
738 luma_weighted_l1 = true;
739 chroma_weighted_l1 = sps.chroma_format_idc != 0;
740 for (i, winfo) in slice_hdr.weights_l1.iter().enumerate().take(slice_hdr.num_ref_idx_l1_active) {
741 if winfo.luma_weighted {
742 luma_weight_l1[i] = winfo.luma_weight.into();
743 luma_offset_l1[i] = winfo.luma_offset.into();
744 } else {
745 luma_weight_l1[i] = 1 << slice_hdr.luma_log2_weight_denom;
746 }
747 if chroma_weighted_l1 && winfo.chroma_weighted {
748 chroma_weight_l1[i][0] = winfo.chroma_weight[0].into();
749 chroma_weight_l1[i][1] = winfo.chroma_weight[1].into();
750 chroma_offset_l1[i][0] = winfo.chroma_offset[0].into();
751 chroma_offset_l1[i][1] = winfo.chroma_offset[1].into();
752 } else {
753 chroma_weight_l1[i][0] = 1 << slice_hdr.chroma_log2_weight_denom;
754 chroma_weight_l1[i][1] = 1 << slice_hdr.chroma_log2_weight_denom;
755 chroma_offset_l1[i][0] = 0;
756 chroma_offset_l1[i][1] = 0;
757 }
758 }
759 }
760 if pps.weighted_bipred_idc == 2 && slice_hdr.slice_type == SliceType::B {
761 let num_l0 = slice_hdr.num_ref_idx_l0_active;
762 let num_l1 = slice_hdr.num_ref_idx_l1_active;
763 if num_l0 != 1 || num_l1 != 1 { //xxx: also exclude symmetric case
764 luma_weighted_l0 = false;
765 luma_weighted_l1 = false;
766 chroma_weighted_l0 = false;
767 chroma_weighted_l1 = false;
768 luma_log2_weight_denom = 5;
769 chroma_log2_weight_denom = 5;
770
771 for w in luma_weight_l0.iter_mut() {
772 *w = 32;
773 }
774 for w in luma_weight_l1.iter_mut() {
775 *w = 32;
776 }
777 for w in chroma_weight_l0.iter_mut() {
778 *w = [32; 2];
779 }
780 for w in chroma_weight_l1.iter_mut() {
781 *w = [32; 2];
782 }
783 }
784 }
785
786 let ref_pic_list_0 = map_ref_list(&self.frame_refs.cur_refs.ref_list0);
787 let ref_pic_list_1 = map_ref_list(&self.frame_refs.cur_refs.ref_list1);
788
789 let slice_param = SliceParameterBufferH264::new(
790 raw_nal.len() as u32,
791 0, // no offset
792 VASliceDataFlag::All,
793 br.tell() as u16,
794 slice_hdr.first_mb_in_slice as u16,
795 match slice_hdr.slice_type {
796 SliceType::I => 2,
797 SliceType::P => 0,
798 SliceType::B => 1,
799 SliceType::SI => 4,
800 SliceType::SP => 3,
801 },
802 slice_hdr.direct_spatial_mv_pred as u8,
803 (slice_hdr.num_ref_idx_l0_active as u8).saturating_sub(1),
804 (slice_hdr.num_ref_idx_l1_active as u8).saturating_sub(1),
805 slice_hdr.cabac_init_idc,
806 slice_hdr.slice_qp_delta as i8,
807 slice_hdr.disable_deblocking_filter_idc,
808 slice_hdr.slice_alpha_c0_offset / 2,
809 slice_hdr.slice_beta_offset / 2,
810 ref_pic_list_0,
811 ref_pic_list_1,
812 luma_log2_weight_denom,
813 chroma_log2_weight_denom,
814 luma_weighted_l0 as u8, luma_weight_l0, luma_offset_l0,
815 chroma_weighted_l0 as u8, chroma_weight_l0, chroma_offset_l0,
816 luma_weighted_l1 as u8, luma_weight_l1, luma_offset_l1,
817 chroma_weighted_l1 as u8, chroma_weight_l1, chroma_offset_l1,
818 );
819 let slc_param = BufferType::SliceParameter(SliceParameter::H264(slice_param));
820 let buf = vactx.context.create_buffer(slc_param).map_err(|_| DecoderError::Bug)?;
821 pic.add_buffer(buf);
822
823 let slc_data = BufferType::SliceData(raw_nal.to_vec());
824 let buf = vactx.context.create_buffer(slc_data).map_err(|_| DecoderError::Bug)?;
825 pic.add_buffer(buf);
826 },
827 2 => { // slice data partition A
828 //slice header
829 //slice id = read_ue()
830 //cat 2 slice data (all but MB layer residual)
831 return Err(DecoderError::NotImplemented);
832 },
833 3 => { // slice data partition B
834 //slice id = read_ue()
835 //if pps.redundant_pic_cnt_present { redundant_pic_cnt = read_ue() }
836 //cat 3 slice data (MB layer residual)
837 return Err(DecoderError::NotImplemented);
838 },
839 4 => { // slice data partition C
840 //slice id = read_ue()
841 //if pps.redundant_pic_cnt_present { redundant_pic_cnt = read_ue() }
842 //cat 4 slice data (MB layer residual)
843 return Err(DecoderError::NotImplemented);
844 },
845 6 => {}, //SEI
846 7 => {
847 let sps = parse_sps(&src[1..])?;
848 self.spses.push(sps);
849 },
850 8 => {
851 validate!(full_size >= 8 + 16);
852 let pps = parse_pps(&src[1..], &self.spses, full_size - 8)?;
853 let mut found = false;
854 for stored_pps in self.ppses.iter_mut() {
855 if stored_pps.pic_parameter_set_id == pps.pic_parameter_set_id {
856 *stored_pps = pps.clone();
857 found = true;
858 break;
859 }
860 }
861 if !found {
862 self.ppses.push(pps);
863 }
864 },
865 9 => { // access unit delimiter
866 },
867 10 => {}, //end of sequence
868 11 => {}, //end of stream
869 12 => {}, //filler
870 _ => {},
871 };
872
873 br.read_skip(size)?;
874 }
875
876 let bpic = pic.begin().expect("begin");
877 let rpic = bpic.render().expect("render");
878 let epic = rpic.end().expect("end");
879
880 self.reorderer.add_frame(WaitingFrame {
881 pic: epic,
882 is_idr: is_keyframe,
883 is_ref,
884 ftype: frame_type,
885 ts: timestamp,
886 });
887
888 let mut idx = 0;
889 while idx < vactx.ref_pics.len() {
890 let cur_surf_id = vactx.ref_pics[idx].1;
891 if self.frame_refs.ref_pics.iter().any(|fref| fref.surface_id == cur_surf_id) {
892 idx += 1;
893 } else {
894 let (pic, _) = vactx.ref_pics.remove(idx);
895 if let Ok(surf) = pic.take_surface() {
896 vactx.surfaces.push(surf);
897 } else {
898 panic!("can't take surface");
899 }
900 }
901 }
902
903 Ok(())
904 }
905 fn get_frame(&mut self) -> Option<NAFrameRef> {
906 if let Some(ref mut vactx) = self.vaapi {
907 if let Some(frm) = self.reorderer.get_frame() {
908 let ts = frm.ts;
909 let is_idr = frm.is_idr;
910 let is_ref = frm.is_ref;
911 let ftype = frm.ftype;
912 if let Ok(pic) = frm.pic.sync() {
913 let _ = fill_frame(vactx.ifmt, &pic, &mut self.out_frm);
914
915 if !is_ref {
916 if let Ok(surf) = pic.take_surface() {
917 vactx.surfaces.push(surf);
918 } else {
919 panic!("can't take surface");
920 }
921 } else {
922 let id = pic.surface_id();
923 vactx.ref_pics.push((pic, id));
924 }
925
926 let ts = NATimeInfo::new(None, Some(ts), None, self.tb_num, self.tb_den);
927 Some(NAFrame::new(ts, ftype, is_idr, self.info.clone(), self.out_frm.clone()).into_ref())
928 } else {
929 panic!("can't sync");
930 }
931 } else {
932 None
933 }
934 } else {
935 None
936 }
937 }
938 fn get_last_frames(&mut self) -> Option<NAFrameRef> {
939 if let Some(ref mut vactx) = self.vaapi {
940 if let Some(frm) = self.reorderer.frames.pop_front() {
941 let ts = frm.ts;
942 let is_idr = frm.is_idr;
943 let is_ref = frm.is_ref;
944 let ftype = frm.ftype;
945 if let Ok(pic) = frm.pic.sync() {
946 let _ = fill_frame(vactx.ifmt, &pic, &mut self.out_frm);
947
948 if !is_ref {
949 if let Ok(surf) = pic.take_surface() {
950 vactx.surfaces.push(surf);
951 } else {
952 panic!("can't take surface");
953 }
954 } else {
955 let id = pic.surface_id();
956 vactx.ref_pics.push((pic, id));
957 }
958
959 let ts = NATimeInfo::new(None, Some(ts), None, self.tb_num, self.tb_den);
960 Some(NAFrame::new(ts, ftype, is_idr, self.info.clone(), self.out_frm.clone()).into_ref())
961 } else {
962 panic!("can't sync");
963 }
964 } else {
965 None
966 }
967 } else {
968 None
969 }
970 }
971 fn flush(&mut self) {
972 self.frame_refs.clear_refs();
973 if let Some(ref mut vactx) = self.vaapi {
974 for frm in self.reorderer.frames.drain(..) {
975 if let Ok(pic) = frm.pic.sync() {
976 if let Ok(surf) = pic.take_surface() {
977 vactx.surfaces.push(surf);
978 } else {
979 panic!("can't take surface");
980 }
981 } else {
982 panic!("can't sync");
983 }
984 }
985 self.reorderer.flush();
986 for (pic, _) in vactx.ref_pics.drain(..) {
987 if let Ok(surf) = pic.take_surface() {
988 vactx.surfaces.push(surf);
989 } else {
990 panic!("can't take surface");
991 }
992 }
993 }
994 }
995}
996
997impl NAOptionHandler for VaapiH264Decoder {
998 fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] }
999 fn set_options(&mut self, _options: &[NAOption]) {}
1000 fn query_option_value(&self, _name: &str) -> Option<NAValue> { None }
1001}
1002
1003use std::thread::*;
1004use std::sync::mpsc::*;
1005
1006enum DecMessage {
1007 Init(NACodecInfoRef),
1008 Decode(NAPacket),
1009 Flush,
1010 GetFrame,
1011 GetLastFrames,
1012 End
1013}
1014
1015enum DecResponse {
1016 Ok,
1017 Nothing,
1018 Err(DecoderError),
1019 Frame(NAFrameRef),
1020}
1021
1022pub trait HWDecoder {
1023 fn init(&mut self, info: NACodecInfoRef) -> DecoderResult<()>;
1024 fn queue_pkt(&mut self, pkt: &NAPacket) -> DecoderResult<()>;
1025 fn get_frame(&mut self) -> Option<NAFrameRef>;
1026 fn get_last_frames(&mut self) -> Option<NAFrameRef>;
1027 fn flush(&mut self);
1028}
1029
1030pub struct HWWrapper {
1031 handle: Option<JoinHandle<DecoderResult<()>>>,
1032 send: SyncSender<DecMessage>,
1033 recv: Receiver<DecResponse>,
1034}
1035
1036#[allow(clippy::new_without_default)]
1037impl HWWrapper {
1038 pub fn new() -> Self {
1039 let (in_send, in_recv) = sync_channel(1);
1040 let (out_send, out_recv) = sync_channel(1);
1041 let handle = std::thread::spawn(move || {
1042 let receiver = in_recv;
1043 let sender = out_send;
1044 let mut dec = VaapiH264Decoder::new();
1045 while let Ok(msg) = receiver.recv() {
1046 match msg {
1047 DecMessage::Init(info) => {
1048 let msg = if let Err(err) = dec.init(info) {
1049 DecResponse::Err(err)
1050 } else {
1051 DecResponse::Ok
1052 };
1053 sender.send(msg).map_err(|_| DecoderError::Bug)?;
1054 },
1055 DecMessage::Decode(pkt) => {
1056 let msg = match dec.decode(&pkt) {
1057 Ok(()) => DecResponse::Ok,
1058 Err(err) => DecResponse::Err(err),
1059 };
1060 sender.send(msg).map_err(|_| DecoderError::Bug)?;
1061 },
1062 DecMessage::GetFrame => {
1063 let msg = match dec.get_frame() {
1064 Some(frm) => DecResponse::Frame(frm),
1065 None => DecResponse::Nothing,
1066 };
1067 sender.send(msg).map_err(|_| DecoderError::Bug)?;
1068 },
1069 DecMessage::GetLastFrames => {
1070 let msg = match dec.get_last_frames() {
1071 Some(frm) => DecResponse::Frame(frm),
1072 None => DecResponse::Nothing,
1073 };
1074 sender.send(msg).map_err(|_| DecoderError::Bug)?;
1075 },
1076 DecMessage::Flush => dec.flush(),
1077 DecMessage::End => return Ok(()),
1078 };
1079 }
1080 Err(DecoderError::Bug)
1081 });
1082
1083 Self {
1084 handle: Some(handle),
1085 send: in_send,
1086 recv: out_recv,
1087 }
1088 }
1089}
1090
1091impl HWDecoder for HWWrapper {
1092 fn init(&mut self, info: NACodecInfoRef) -> DecoderResult<()> {
1093 if self.send.send(DecMessage::Init(info)).is_ok() {
1094 match self.recv.recv() {
1095 Ok(DecResponse::Ok) => Ok(()),
1096 Ok(DecResponse::Err(err)) => Err(err),
1097 Err(_) => Err(DecoderError::Bug),
1098 _ => unreachable!(),
1099 }
1100 } else {
1101 Err(DecoderError::Bug)
1102 }
1103 }
1104 fn queue_pkt(&mut self, pkt: &NAPacket) -> DecoderResult<()> {
1105 let pkt2 = NAPacket::new_from_refbuf(pkt.get_stream(), pkt.ts, pkt.keyframe, pkt.get_buffer());
1106 if self.send.send(DecMessage::Decode(pkt2)).is_ok() {
1107 match self.recv.recv() {
1108 Ok(DecResponse::Ok) => Ok(()),
1109 Ok(DecResponse::Err(err)) => Err(err),
1110 Err(_) => Err(DecoderError::Bug),
1111 _ => unreachable!(),
1112 }
1113 } else {
1114 Err(DecoderError::Bug)
1115 }
1116 }
1117 fn get_frame(&mut self) -> Option<NAFrameRef> {
1118 if self.send.send(DecMessage::GetFrame).is_ok() {
1119 match self.recv.recv() {
1120 Ok(DecResponse::Frame(frm)) => Some(frm),
1121 Ok(DecResponse::Nothing) => None,
1122 Err(_) => None,
1123 _ => unreachable!(),
1124 }
1125 } else {
1126 None
1127 }
1128 }
1129 fn get_last_frames(&mut self) -> Option<NAFrameRef> {
1130 if self.send.send(DecMessage::GetLastFrames).is_ok() {
1131 match self.recv.recv() {
1132 Ok(DecResponse::Frame(frm)) => Some(frm),
1133 Ok(DecResponse::Nothing) => None,
1134 Err(_) => None,
1135 _ => unreachable!(),
1136 }
1137 } else {
1138 None
1139 }
1140 }
1141 fn flush(&mut self) {
1142 let _ = self.send.send(DecMessage::Flush);
1143 }
1144}
1145
1146impl Drop for HWWrapper {
1147 fn drop(&mut self) {
1148 if self.send.send(DecMessage::End).is_ok() {
1149 let mut handle = None;
1150 std::mem::swap(&mut handle, &mut self.handle);
1151 if let Some(hdl) = handle {
1152 let _ = hdl.join();
1153 }
1154 }
1155 }
1156}
1157
1158impl NAOptionHandler for HWWrapper {
1159 fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] }
1160 fn set_options(&mut self, _options: &[NAOption]) {}
1161 fn query_option_value(&self, _name: &str) -> Option<NAValue> { None }
1162}
1163
1164pub fn new_h264_hwdec() -> Box<dyn HWDecoder + Send> {
1165 Box::new(HWWrapper::new())
1166}
1167
1168#[cfg(test)]
1169mod test {
1170 use nihav_core::codecs::*;
1171 use nihav_core::io::byteio::*;
1172 use nihav_core::demuxers::{RegisteredDemuxers, create_demuxer};
1173 use nihav_commonfmt::generic_register_all_demuxers;
1174 use super::VaapiH264Decoder;
1175 use std::io::prelude::*;
1176
1177 fn decode_h264(name: &str, dname: &str, dmx_reg: &RegisteredDemuxers, opfx: &str) -> DecoderResult<()> {
1178 let dmx_f = dmx_reg.find_demuxer(dname).expect("demuxer exists");
1179 let file = std::fs::File::open(name).expect("file exists");
1180 let mut fr = FileReader::new_read(file);
1181 let mut br = ByteReader::new(&mut fr);
1182 let mut dmx = create_demuxer(dmx_f, &mut br).expect("create demuxer");
1183
1184 let mut vstream_id = 0;
1185 let mut dec = VaapiH264Decoder::new();
1186 for stream in dmx.get_streams() {
1187 if stream.get_media_type() == StreamType::Video {
1188 dec.init(stream.get_info()).expect("inited");
1189 vstream_id = stream.get_id();
1190 break;
1191 }
1192 }
1193
1194 let mut frameno = 0;
1195 while let Ok(pkt) = dmx.get_frame() {
1196 if pkt.get_stream().get_id() != vstream_id {
1197 continue;
1198 }
1199 dec.decode(&pkt).expect("decoded");
1200 let frm = dec.get_last_frames().expect("get frame");
1201 let timestamp = frm.get_dts().unwrap_or_else(|| frm.get_pts().unwrap_or(0));
1202
1203 let pic = frm.get_buffer().get_vbuf().expect("got picture");
1204
1205 let nname = format!("assets/test_out/{}{:06}_{}.pgm", opfx, timestamp, frameno);
1206 frameno += 1;
1207 let mut file = std::fs::File::create(&nname).expect("create file");
1208 let (w, h) = pic.get_dimensions(0);
1209 file.write_all(format!("P5\n{} {}\n255\n", w, h * 3 / 2).as_bytes()).expect("header written");
1210 let data = pic.get_data();
1211 for yline in data.chunks(pic.get_stride(0)).take(h) {
1212 file.write_all(&yline[..w]).expect("Y line written");
1213 }
1214 for (uline, vline) in data[pic.get_offset(1)..].chunks(pic.get_stride(1))
1215 .zip(data[pic.get_offset(2)..].chunks(pic.get_stride(2))).take(h / 2) {
1216 file.write_all(&uline[..w / 2]).expect("U line written");
1217 file.write_all(&vline[..w / 2]).expect("V line written");
1218 }
1219 }
1220 Ok(())
1221 }
1222
1223
1224 // samples if not specified otherwise come from H.264 conformance suite
1225
1226 #[test]
1227 fn test_h264_simple() {
1228 let mut dmx_reg = RegisteredDemuxers::new();
1229 generic_register_all_demuxers(&mut dmx_reg);
1230
1231 decode_h264("assets/ITU/DimpledSpanishCuckoo-mobile.mp4", "mov", &dmx_reg, "hw").unwrap();
1232 }
1233}