1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::vq::*;
5 #[derive(Default,Clone,Copy,PartialEq)]
9 fn unpack(self) -> (u8, u8, u8) {
10 (((self.0 >> 10) & 0x1F) as u8, ((self.0 >> 5) & 0x1F) as u8, (self.0 & 0x1F) as u8)
12 fn pack(r: u8, g: u8, b: u8) -> Self {
13 Pixel16{ 0: (u16::from(r) << 10) | (u16::from(g) << 5) | u16::from(b) }
16 impl VQElement for Pixel16 {
17 fn dist(&self, rval: Self) -> u32 {
18 let (r0, g0, b0) = self.unpack();
19 let (r1, g1, b1) = rval.unpack();
20 let rd = i32::from(r0) - i32::from(r1);
21 let gd = i32::from(g0) - i32::from(g1);
22 let bd = i32::from(b0) - i32::from(b1);
23 (rd * rd + gd * gd + bd * bd) as u32
25 fn min_cw() -> Self { Pixel16(0x0000) }
26 fn max_cw() -> Self { Pixel16(0x7FFF) }
27 fn min(&self, rval: Self) -> Self {
28 let (r0, g0, b0) = self.unpack();
29 let (r1, g1, b1) = rval.unpack();
30 Self::pack(r0.min(r1), g0.min(g1), b0.min(b1))
32 fn max(&self, rval: Self) -> Self {
33 let (r0, g0, b0) = self.unpack();
34 let (r1, g1, b1) = rval.unpack();
35 Self::pack(r0.max(r1), g0.max(g1), b0.max(b1))
37 fn num_components() -> usize { 3 }
38 fn sort_by_component(arr: &mut [Self], component: usize) {
39 let mut counts = [0; 32];
40 for pix in arr.iter() {
41 let (r, g, b) = pix.unpack();
42 let idx = match component {
49 let mut offs = [0; 32];
51 offs[i + 1] = offs[i] + counts[i];
53 let mut dst = [Pixel16(0); 16];
54 assert!(dst.len() >= arr.len());
55 for pix in arr.iter() {
56 let (r, g, b) = pix.unpack();
57 let idx = match component {
62 dst[offs[idx]] = *pix;
66 arr.copy_from_slice(&dst[..len]);
68 fn max_dist_component(min: &Self, max: &Self) -> usize {
69 let (r0, g0, b0) = max.unpack();
70 let (r1, g1, b1) = min.unpack();
71 let rd = u32::from(r0) - u32::from(r1);
72 let gd = u32::from(g0) - u32::from(g1);
73 let bd = u32::from(b0) - u32::from(b1);
74 if rd > gd && rd > bd {
76 } else if bd > rd && bd > gd {
91 impl VQElementSum<Pixel16> for Pixel16Sum {
92 fn zero() -> Self { Pixel16Sum { rsum: 0, gsum: 0, bsum: 0, count: 0 } }
93 fn add(&mut self, rval: Pixel16, count: u64) {
94 let (r, g, b) = rval.unpack();
95 self.rsum += u64::from(r) * count;
96 self.gsum += u64::from(g) * count;
97 self.bsum += u64::from(b) * count;
100 fn get_centroid(&self) -> Pixel16 {
102 let r = ((self.rsum + self.count / 2) / self.count) as u8;
103 let g = ((self.gsum + self.count / 2) / self.count) as u8;
104 let b = ((self.bsum + self.count / 2) / self.count) as u8;
105 Pixel16::pack(r, g, b)
121 clr8: [[Pixel16; 2]; 4],
125 fn calc_stats(&mut self, buf: &[Pixel16; 16]) {
126 let num_cw = quantise_median_cut::<Pixel16, Pixel16Sum>(buf, &mut self.clr2);
128 self.fill_val = Pixel16 { 0: buf[0].0 & !0x400 };
130 let mut avg = Pixel16Sum::zero();
131 for pix in buf.iter() {
134 self.fill_val = Pixel16 { 0: avg.get_centroid().0 & !0x400 };
137 for pix in buf.iter() {
138 self.fill_dist += pix.dist(self.fill_val);
140 if self.fill_dist == 0 {
141 self.clr2_dist = std::u32::MAX;
142 self.clr8_dist = std::u32::MAX;
146 self.clr2_flags = 0u16;
150 for pix in buf.iter() {
151 let dist0 = pix.dist(self.clr2[0]);
152 let dist1 = pix.dist(self.clr2[1]);
154 self.clr2_flags |= mask;
155 self.clr2_dist += dist0;
157 self.clr2_dist += dist1;
161 if (self.clr2_flags & 0x8000) != 0 {
162 self.clr2_flags = !self.clr2_flags;
163 self.clr2.swap(0, 1);
166 self.clr2_dist = self.fill_dist;
167 self.clr2 = [self.fill_val; 2];
169 if self.clr2_dist == 0 {
170 self.clr8_dist = std::u32::MAX;
174 self.clr8 = [[Pixel16 { 0: 0}; 2]; 4];
179 let off = (i & 1) * 2 + (i & 2) * 4;
180 let src2 = [buf[off], buf[off + 1], buf[off + 4], buf[off + 5]];
181 let nc = quantise_median_cut::<Pixel16, Pixel16Sum>(&src2, &mut self.clr8[i]);
183 self.clr8[i][1] = self.clr8[i][0];
186 let dist0 = src2[j].dist(self.clr8[i][0]);
187 let dist1 = src2[j].dist(self.clr8[i][1]);
189 self.clr8_flags |= mask;
190 self.clr8_dist += dist0;
192 self.clr8_dist += dist1;
197 if (self.clr8_flags & 0x8000) != 0 {
198 self.clr8_flags ^= 0xF000;
199 self.clr8[3].swap(0, 1);
202 fn put_fill(&self, dst: &mut [u16], dstride: usize) {
203 for line in dst.chunks_mut(dstride) {
205 line[i] = self.fill_val.0;
209 fn put_clr2(&self, dst: &mut [u16], dstride: usize) {
212 if (self.clr2_flags & (1 << (i + j * 4))) == 0 {
213 dst[i + j * dstride] = self.clr2[0].0;
215 dst[i + j * dstride] = self.clr2[1].0;
220 fn put_clr8(&self, dst: &mut [u16], dstride: usize) {
222 let off = (i & 1) * 2 + (i & 2) * dstride;
223 let cur_flg = (self.clr8_flags >> (i * 4)) & 0xF;
224 dst[off] = self.clr8[i][( !cur_flg & 1) as usize].0;
225 dst[off + 1] = self.clr8[i][((!cur_flg >> 1) & 1) as usize].0;
226 dst[off + dstride] = self.clr8[i][((!cur_flg >> 2) & 1) as usize].0;
227 dst[off + 1 + dstride] = self.clr8[i][((!cur_flg >> 3) & 1) as usize].0;
230 fn write_fill(&self, bw: &mut ByteWriter) -> EncoderResult<()> {
231 bw.write_u16le(self.fill_val.0 | 0x8000)?;
234 fn write_clr2(&self, bw: &mut ByteWriter) -> EncoderResult<()> {
235 bw.write_u16le(self.clr2_flags)?;
236 bw.write_u16le(self.clr2[0].0)?;
237 bw.write_u16le(self.clr2[1].0)?;
240 fn write_clr8(&self, bw: &mut ByteWriter) -> EncoderResult<()> {
241 bw.write_u16le(self.clr8_flags)?;
242 bw.write_u16le(self.clr8[0][0].0 | 0x8000)?;
243 bw.write_u16le(self.clr8[0][1].0)?;
244 bw.write_u16le(self.clr8[1][0].0)?;
245 bw.write_u16le(self.clr8[1][1].0)?;
246 bw.write_u16le(self.clr8[2][0].0)?;
247 bw.write_u16le(self.clr8[2][1].0)?;
248 bw.write_u16le(self.clr8[3][0].0)?;
249 bw.write_u16le(self.clr8[3][1].0)?;
254 struct MSVideo1Encoder {
255 stream: Option<NAStreamRef>,
256 pkt: Option<NAPacket>,
257 pool: NAVideoBufferPool<u16>,
258 lastfrm: Option<NAVideoBufferRef<u16>>,
264 impl MSVideo1Encoder {
269 pool: NAVideoBufferPool::new(2),
276 fn get_block(src: &[u16], sstride: usize, buf: &mut [Pixel16; 16]) {
277 for (line, dst) in src.chunks(sstride).zip(buf.chunks_mut(4)) {
279 dst[i] = Pixel16 { 0: line[i] };
283 fn write_skips(bw: &mut ByteWriter, skips: usize) -> EncoderResult<()> {
284 bw.write_u16le((skips as u16) | 0x8400)?;
287 fn encode_inter(bw: &mut ByteWriter, cur_frm: &mut NAVideoBuffer<u16>, in_frm: &NAVideoBuffer<u16>, prev_frm: &NAVideoBuffer<u16>, _quality: u8) -> EncoderResult<bool> {
288 let mut is_intra = true;
289 let src = in_frm.get_data();
290 let sstride = in_frm.get_stride(0);
291 let soff = in_frm.get_offset(0);
292 let (w, h) = in_frm.get_dimensions(0);
293 let rsrc = prev_frm.get_data();
294 let rstride = prev_frm.get_stride(0);
295 let roff = prev_frm.get_offset(0);
296 let dstride = cur_frm.get_stride(0);
297 let doff = cur_frm.get_offset(0);
298 let dst = cur_frm.get_data_mut().unwrap();
299 let mut skip_run = 0;
300 for ((sstrip, rstrip), dstrip) in (&src[soff..]).chunks(sstride * 4).take(h / 4).zip((&rsrc[roff..]).chunks(rstride * 4)).zip((&mut dst[doff..]).chunks_mut(dstride * 4)) {
301 for x in (0..w).step_by(4) {
302 let mut buf = [Pixel16::min_cw(); 16];
303 let mut refbuf = [Pixel16::min_cw(); 16];
304 Self::get_block(&sstrip[x..], sstride, &mut buf);
305 Self::get_block(&rstrip[x..], rstride, &mut refbuf);
307 let mut skip_dist = 0;
308 for (pix, rpix) in buf.iter().zip(refbuf.iter()) {
309 skip_dist += pix.dist(*rpix);
314 if skip_run == 1023 {
315 Self::write_skips(bw, skip_run)?;
321 let mut bstate = BlockState::default();
322 bstate.calc_stats(&buf);
324 let dst = &mut dstrip[x..];
325 if skip_dist <= bstate.fill_dist {
328 if skip_run == 1023 {
329 Self::write_skips(bw, skip_run)?;
332 } else if bstate.fill_dist <= bstate.clr2_dist {
333 bstate.put_fill(dst, dstride);
335 Self::write_skips(bw, skip_run)?;
338 bstate.write_fill(bw)?;
339 } else if bstate.clr8_dist < bstate.clr2_dist {
340 bstate.put_clr8(dst, dstride);
342 Self::write_skips(bw, skip_run)?;
345 bstate.write_clr8(bw)?;
347 bstate.put_clr2(dst, dstride);
349 Self::write_skips(bw, skip_run)?;
352 bstate.write_clr2(bw)?;
357 Self::write_skips(bw, skip_run)?;
361 } //xxx: something for inter?
364 fn encode_intra(bw: &mut ByteWriter, cur_frm: &mut NAVideoBuffer<u16>, in_frm: &NAVideoBuffer<u16>, _quality: u8) -> EncoderResult<bool> {
365 let src = in_frm.get_data();
366 let sstride = in_frm.get_stride(0);
367 let soff = in_frm.get_offset(0);
368 let (w, h) = in_frm.get_dimensions(0);
369 let dstride = cur_frm.get_stride(0);
370 let doff = cur_frm.get_offset(0);
371 let dst = cur_frm.get_data_mut().unwrap();
372 for (sstrip, dstrip) in (&src[soff..]).chunks(sstride * 4).take(h / 4).zip((&mut dst[doff..]).chunks_mut(dstride * 4)) {
373 for x in (0..w).step_by(4) {
374 let mut buf = [Pixel16::min_cw(); 16];
375 Self::get_block(&sstrip[x..], sstride, &mut buf);
376 let mut bstate = BlockState::default();
377 bstate.calc_stats(&buf);
379 let dst = &mut dstrip[x..];
380 if bstate.fill_dist <= bstate.clr2_dist {
381 bstate.put_fill(dst, dstride);
382 bstate.write_fill(bw)?;
383 } else if bstate.clr8_dist < bstate.clr2_dist {
384 bstate.put_clr8(dst, dstride);
385 bstate.write_clr8(bw)?;
387 bstate.put_clr2(dst, dstride);
388 bstate.write_clr2(bw)?;
397 const RGB555_FORMAT: NAPixelFormaton = NAPixelFormaton {
398 model: ColorModel::RGB(RGBSubmodel::RGB), components: 3,
400 Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, packed: true, depth: 5, shift: 10, comp_offs: 0, next_elem: 2 }),
401 Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, packed: true, depth: 5, shift: 5, comp_offs: 0, next_elem: 2 }),
402 Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, packed: true, depth: 5, shift: 0, comp_offs: 0, next_elem: 2 }),
404 elem_size: 2, be: false, alpha: false, palette: false };
406 impl NAEncoder for MSVideo1Encoder {
407 fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult<EncodeParameters> {
408 match encinfo.format {
409 NACodecTypeInfo::None => {
410 let mut ofmt = EncodeParameters::default();
411 ofmt.format = NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, RGB555_FORMAT));
414 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
415 NACodecTypeInfo::Video(vinfo) => {
416 let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, true, RGB555_FORMAT);
417 let mut ofmt = *encinfo;
418 ofmt.format = NACodecTypeInfo::Video(outinfo);
423 fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult<NAStreamRef> {
424 match encinfo.format {
425 NACodecTypeInfo::None => Err(EncoderError::FormatError),
426 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
427 NACodecTypeInfo::Video(vinfo) => {
428 if vinfo.format != RGB555_FORMAT {
429 return Err(EncoderError::FormatError);
431 if ((vinfo.width | vinfo.height) & 3) != 0 {
432 return Err(EncoderError::FormatError);
435 let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, true, RGB555_FORMAT);
436 let info = NACodecInfo::new("msvideo1", NACodecTypeInfo::Video(out_info), None);
437 let mut stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den, 0);
438 stream.set_num(stream_id as usize);
439 let stream = stream.into_ref();
440 if self.pool.prealloc_video(out_info, 2).is_err() {
441 return Err(EncoderError::AllocError);
444 self.stream = Some(stream.clone());
445 self.quality = encinfo.quality;
451 fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> {
452 let buf = frm.get_buffer();
453 if let Some(ref vbuf) = buf.get_vbuf16() {
454 let mut cur_frm = self.pool.get_free().unwrap();
455 let mut dbuf = Vec::with_capacity(4);
456 let mut gw = GrowableMemoryWriter::new_write(&mut dbuf);
457 let mut bw = ByteWriter::new(&mut gw);
458 if self.frmcount == 0 {
461 let is_intra = if let Some(ref prev_buf) = self.lastfrm {
462 Self::encode_inter(&mut bw, &mut cur_frm, vbuf, prev_buf, self.quality)?
464 Self::encode_intra(&mut bw, &mut cur_frm, vbuf, self.quality)?
466 self.lastfrm = Some(cur_frm);
467 self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf));
469 if self.frmcount == self.key_int {
474 Err(EncoderError::InvalidParameters)
477 fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>> {
479 std::mem::swap(&mut self.pkt, &mut npkt);
482 fn flush(&mut self) -> EncoderResult<()> {
488 const ENCODER_OPTS: &[NAOptionDefinition] = &[
490 name: KEYFRAME_OPTION, description: KEYFRAME_OPTION_DESC,
491 opt_type: NAOptionDefinitionType::Int(Some(0), Some(128)) },
494 impl NAOptionHandler for MSVideo1Encoder {
495 fn get_supported_options(&self) -> &[NAOptionDefinition] { ENCODER_OPTS }
496 fn set_options(&mut self, options: &[NAOption]) {
497 for option in options.iter() {
498 for opt_def in ENCODER_OPTS.iter() {
499 if opt_def.check(option).is_ok() {
502 if let NAValue::Int(intval) = option.value {
503 self.key_int = intval as u8;
512 fn query_option_value(&self, name: &str) -> Option<NAValue> {
514 KEYFRAME_OPTION => Some(NAValue::Int(i64::from(self.key_int))),
520 pub fn get_encoder() -> Box<dyn NAEncoder + Send> {
521 Box::new(MSVideo1Encoder::new())
526 use nihav_core::codecs::*;
527 use nihav_core::demuxers::*;
528 use nihav_core::muxers::*;
530 use nihav_commonfmt::*;
531 use nihav_codec_support::test::enc_video::*;
532 use super::RGB555_FORMAT;
535 fn test_ms_video1_encoder() {
536 let mut dmx_reg = RegisteredDemuxers::new();
537 generic_register_all_demuxers(&mut dmx_reg);
538 let mut dec_reg = RegisteredDecoders::new();
539 generic_register_all_decoders(&mut dec_reg);
540 ms_register_all_decoders(&mut dec_reg);
541 let mut mux_reg = RegisteredMuxers::new();
542 generic_register_all_muxers(&mut mux_reg);
543 let mut enc_reg = RegisteredEncoders::new();
544 ms_register_all_encoders(&mut enc_reg);
546 let dec_config = DecoderTestParams {
548 in_name: "assets/Misc/TalkingHead_352x288.avi",
549 stream_type: StreamType::Video,
553 let enc_config = EncoderTestParams {
555 enc_name: "msvideo1",
556 out_name: "msvideo1.avi",
559 let dst_vinfo = NAVideoInfo {
562 format: RGB555_FORMAT,
566 let enc_params = EncodeParameters {
567 format: NACodecTypeInfo::Video(dst_vinfo),
574 //test_encoding_to_file(&dec_config, &enc_config, enc_params);
575 test_encoding_md5(&dec_config, &enc_config, enc_params,
576 &[0x0fc27a11, 0x04337f5d, 0xb8037362, 0xc4f69d8b]);