pub struct VaapiH264Decoder {
info: NACodecInfoRef,
vaapi: Option<VaapiInternals>,
+ needs_derive: bool,
spses: Vec<SeqParameterSet>,
ppses: Vec<PicParameterSet>,
frame_refs: FrameRefs,
}
}
-fn fill_frame(ifmt: VAImageFormat, pic: &Picture<PictureSync>, frm: &mut NABufferType) -> DecoderResult<()> {
+fn fill_frame(ifmt: VAImageFormat, pic: &Picture<PictureSync>, frm: &mut NABufferType, needs_derive: bool) -> DecoderResult<()> {
let mut vbuf = frm.get_vbuf().unwrap();
let (w, h) = pic.surface_size();
//let cur_ts = pic.timestamp();
- let img = Image::new(pic, ifmt, w, h, true).expect("get image");
+ let img = Image::new(pic, ifmt, w, h, !needs_derive).expect("get image");
let iimg = img.image();
let imgdata: &[u8] = img.as_ref();
deint_chroma(frm, &imgdata[iimg.offsets[1] as usize..], iimg.pitches[1] as usize);
},
+ VAFourcc::YV12 => {
+ let frm = NASimpleVideoFrame::from_video_buf(&mut vbuf).unwrap();
+ validate!(iimg.width == (((frm.width[0] + 15) & !15) as u16));
+ validate!(iimg.height == (((frm.height[0] + 15) & !15) as u16));
+
+ copy_luma(&mut frm.data[frm.offset[0]..], frm.stride[0], &imgdata[iimg.offsets[0] as usize..], iimg.pitches[0] as usize, (frm.width[0] + 15) & !15, (frm.height[0] + 15) & !15);
+ copy_luma(&mut frm.data[frm.offset[2]..], frm.stride[2], &imgdata[iimg.offsets[1] as usize..], iimg.pitches[1] as usize, (frm.width[1] + 15) & !15, (frm.height[1] + 15) & !15);
+ copy_luma(&mut frm.data[frm.offset[1]..], frm.stride[1], &imgdata[iimg.offsets[2] as usize..], iimg.pitches[2] as usize, (frm.width[2] + 15) & !15, (frm.height[2] + 15) & !15);
+ },
_ => unimplemented!(),
};
Ok(())
Self {
info: NACodecInfoRef::default(),
vaapi: None,
+ needs_derive: false,
spses: Vec::with_capacity(1),
ppses: Vec::with_capacity(4),
frame_refs: FrameRefs::new(),
return Err(DecoderError::Bug);
}
+ let needs_derive= if let Ok(vendor) = display.query_vendor_string() {
+ vendor.contains("Kaby Lake")
+ } else { false };
+
let config = display.create_config(vec![
VAConfigAttrib { type_: VAConfigAttribType::VAConfigAttribRTFormat, value: RTFormat::YUV420.into() },
], va_profile, VAEntrypoint::VAEntrypointVLD).map_err(|_| {
}
self.vaapi = Some(VaapiInternals { display, context, ref_pics, surfaces, ifmt });
+ self.needs_derive = needs_derive;
let vinfo = NAVideoInfo::new(vinfo.get_width(), vinfo.get_height(), false, YUV420_FORMAT);
self.info = NACodecInfo::new_ref(info.get_name(), NACodecTypeInfo::Video(vinfo), info.get_extradata()).into_ref();
let is_ref = frm.is_ref;
let ftype = frm.ftype;
if let Ok(pic) = frm.pic.sync() {
- let _ = fill_frame(vactx.ifmt, &pic, &mut self.out_frm);
+ let _ = fill_frame(vactx.ifmt, &pic, &mut self.out_frm, self.needs_derive);
if !is_ref {
if let Ok(surf) = pic.take_surface() {
let is_ref = frm.is_ref;
let ftype = frm.ftype;
if let Ok(pic) = frm.pic.sync() {
- let _ = fill_frame(vactx.ifmt, &pic, &mut self.out_frm);
+ let _ = fill_frame(vactx.ifmt, &pic, &mut self.out_frm, self.needs_derive);
if !is_ref {
if let Ok(surf) = pic.take_surface() {