1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::vq::*;
5 #[derive(Default,Clone,Copy,PartialEq,Debug)]
11 impl VQElement for YUVCode {
12 fn dist(&self, rval: Self) -> u32 {
14 for (y0, y1) in self.y.iter().zip(rval.y.iter()) {
15 let yd = i32::from(*y0) - i32::from(*y1);
18 let ud = i32::from(self.u) - i32::from(rval.u);
19 let vd = i32::from(self.v) - i32::from(rval.v);
20 (ysum + ud * ud + vd * vd) as u32
22 fn min_cw() -> Self { YUVCode { y: [0; 4], u: 0, v: 0 } }
23 fn max_cw() -> Self { YUVCode { y: [255; 4], u: 255, v: 255 } }
24 fn min(&self, rval: Self) -> Self {
25 let mut ycode = YUVCode::default();
27 ycode.y[i] = self.y[i].min(rval.y[i]);
29 ycode.u = self.u.min(rval.u);
30 ycode.v = self.v.min(rval.v);
33 fn max(&self, rval: Self) -> Self {
34 let mut ycode = YUVCode::default();
36 ycode.y[i] = self.y[i].max(rval.y[i]);
38 ycode.u = self.u.max(rval.u);
39 ycode.v = self.v.max(rval.v);
42 fn num_components() -> usize { 6 }
43 fn sort_by_component(arr: &mut [Self], component: usize) {
44 let mut counts = [0; 256];
45 for entry in arr.iter() {
46 let idx = match component {
47 0 | 1 | 2 | 3 => entry.y[component],
53 let mut offs = [0; 256];
55 offs[i + 1] = offs[i] + counts[i];
57 let mut dst = vec![YUVCode::default(); arr.len()];
58 for entry in arr.iter() {
59 let idx = match component {
60 0 | 1 | 2 | 3 => entry.y[component],
64 dst[offs[idx]] = *entry;
67 arr.copy_from_slice(dst.as_slice());
69 fn max_dist_component(min: &Self, max: &Self) -> usize {
73 let d = u32::from(max.y[i]) - u32::from(min.y[i]);
79 let ud = u32::from(max.u) - u32::from(min.u);
84 let vd = u32::from(max.v) - u32::from(min.v);
100 impl VQElementSum<YUVCode> for YUVCodeSum {
101 fn zero() -> Self { Self::default() }
102 fn add(&mut self, rval: YUVCode, count: u64) {
104 self.ysum[i] += u64::from(rval.y[i]) * count;
106 self.usum += u64::from(rval.u) * count;
107 self.vsum += u64::from(rval.v) * count;
110 fn get_centroid(&self) -> YUVCode {
112 let mut ycode = YUVCode::default();
114 ycode.y[i] = ((self.ysum[i] + self.count / 2) / self.count) as u8;
116 ycode.u = ((self.usum + self.count / 2) / self.count) as u8;
117 ycode.v = ((self.vsum + self.count / 2) / self.count) as u8;
130 fn new() -> Self { Self { seed: 0x12345678 } }
131 fn next(&mut self) -> u8 {
132 let mut x = self.seed;
133 x ^= x.wrapping_shl(13);
136 (self.seed >> 24) as u8
138 fn fill_entry(&mut self, entry: &mut YUVCode) {
139 for y in entry.y.iter_mut() {
142 entry.u = self.next();
143 entry.v = self.next();
147 const GRAY_FORMAT: NAPixelFormaton = NAPixelFormaton {
148 model: ColorModel::YUV(YUVSubmodel::YUVJ),
150 comp_info: [Some(NAPixelChromaton{h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 0, next_elem: 1}), None, None, None, None],
171 fn reset(&mut self) {
176 fn put_v1(&mut self) {
183 fn put_v4(&mut self) {
191 fn put_inter(&mut self, skip: bool) {
193 self.mask |= !skip as u32;
199 fn flush(&mut self) {
200 self.masks.push(self.mask);
205 if self.pos == 0 { return; }
206 while self.pos < 32 {
214 #[derive(Clone,Copy,PartialEq)]
221 impl std::string::ToString for QuantMode {
222 fn to_string(&self) -> String {
224 QuantMode::ELBG => "elbg".to_string(),
225 QuantMode::Fast => "fast".to_string(),
226 QuantMode::MedianCut => "mediancut".to_string(),
231 #[derive(Clone,Copy,PartialEq)]
238 struct CinepakEncoder {
239 stream: Option<NAStreamRef>,
240 lastfrm: Option<NAVideoBufferRef<u8>>,
241 pkt: Option<NAPacket>,
250 v1_entries: Vec<YUVCode>,
251 v4_entries: Vec<YUVCode>,
252 v1_cb: Vec<[YUVCode; 256]>,
253 v4_cb: Vec<[YUVCode; 256]>,
254 v1_cur_cb: Vec<[YUVCode; 256]>,
255 v4_cur_cb: Vec<[YUVCode; 256]>,
264 fst_bins: [Vec<YUVCode>; 4],
265 v1_cand: Vec<YUVCode>,
266 v4_cand: Vec<YUVCode>,
267 cmode: Vec<CodingMode>,
270 fn avg4(a: u8, b: u8, c: u8, d: u8) -> u8 {
271 ((u16::from(a) + u16::from(b) + u16::from(c) + u16::from(d) + 3) >> 2) as u8
274 fn variance(a: u8, mean: u8) -> u32 {
276 u32::from(a - mean) * u32::from(a - mean)
278 u32::from(mean - a) * u32::from(mean - a)
282 fn patch_size(bw: &mut ByteWriter, pos: u64) -> EncoderResult<()> {
283 let size = bw.tell() - pos;
284 bw.seek(SeekFrom::Current(-((size + 3) as i64)))?;
285 bw.write_u24be((size + 4) as u32)?;
286 bw.seek(SeekFrom::End(0))?;
290 fn elbg_quant(entries: &[YUVCode], codebook: &mut [YUVCode]) -> usize {
291 let cb_len = quantise_median_cut::<YUVCode, YUVCodeSum>(entries, codebook);
292 if cb_len < codebook.len() {
295 let mut elbg: ELBG<YUVCode, YUVCodeSum> = ELBG::new(codebook);
296 elbg.quantise(entries, codebook)
300 fn quant_fast(bins: &mut [Vec<YUVCode>; 4], entries: &[YUVCode], codebook: &mut [YUVCode]) -> usize {
301 for bin in bins.iter_mut() {
304 for &entry in entries.iter() {
305 let y_avg = avg4(entry.y[0], entry.y[1], entry.y[2], entry.y[3]);
306 let dist = entry.y.iter().fold(0u32, |acc, &x| acc + variance(x, y_avg));
307 let ilog = if dist == 0 { 0 } else { 32 - dist.leading_zeros() };
308 let bin = match ilog {
309 0..=3 => &mut bins[0],
310 4..=7 => &mut bins[1],
311 8..=11 => &mut bins[2],
316 let mut free_cw = codebook.len();
317 let mut entries_left = entries.len();
319 for bin in bins.iter() {
323 if free_cw == 0 || entries_left == 0 {
326 let target = (free_cw * bin.len() + entries_left - 1) / entries_left;
327 let cur_len = elbg_quant(bin, &mut codebook[offset..][..target]);
330 entries_left -= bin.len();
335 impl CinepakEncoder {
342 qmode: QuantMode::Fast,
349 v1_entries: Vec::new(),
350 v4_entries: Vec::new(),
351 v1_cb: Vec::with_capacity(2),
352 v4_cb: Vec::with_capacity(2),
353 v1_cur_cb: Vec::with_capacity(2),
354 v4_cur_cb: Vec::with_capacity(2),
361 masks: MaskWriter::new(),
362 skip_dist: Vec::new(),
363 fst_bins: [Vec::new(), Vec::new(), Vec::new(), Vec::new()],
369 fn read_strip(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
370 let ystride = in_frm.get_stride(0);
371 let mut yoff = in_frm.get_offset(0) + start * ystride;
372 let ustride = in_frm.get_stride(1);
373 let mut uoff = in_frm.get_offset(1) + start / 2 * ustride;
374 let vstride = in_frm.get_stride(2);
375 let mut voff = in_frm.get_offset(2) + start / 2 * vstride;
376 let (width, _) = in_frm.get_dimensions(0);
377 let data = in_frm.get_data();
378 self.v1_entries.clear();
379 self.v4_entries.clear();
380 for _ in (start..end).step_by(4) {
381 for x in (0..width).step_by(4) {
382 let mut yblk = [0; 16];
383 let mut ublk = [128; 4];
384 let mut vblk = [128; 4];
387 yblk[i + j * 4] = data[yoff + x + i + j * ystride];
393 ublk[i + j * 2] = data[uoff + x / 2 + i + j * ustride];
394 vblk[i + j * 2] = data[voff + x / 2 + i + j * vstride];
398 self.v1_entries.push(YUVCode {
399 y: [avg4(yblk[ 0], yblk[ 1], yblk[ 4], yblk[ 5]),
400 avg4(yblk[ 2], yblk[ 3], yblk[ 6], yblk[ 7]),
401 avg4(yblk[ 8], yblk[ 9], yblk[12], yblk[13]),
402 avg4(yblk[10], yblk[11], yblk[14], yblk[15])],
403 u: avg4(ublk[0], ublk[1], ublk[2], ublk[3]),
404 v: avg4(vblk[0], vblk[1], vblk[2], vblk[3]),
407 let yidx = (i & 1) * 2 + (i & 2) * 4;
408 self.v4_entries.push(YUVCode {
409 y: [ yblk[yidx], yblk[yidx + 1], yblk[yidx + 4], yblk[yidx + 5] ],
420 fn find_nearest(codebook: &[YUVCode], code: YUVCode) -> (u8, u32) {
421 let mut min_dist = std::u32::MAX;
423 for (i, cw) in codebook.iter().enumerate() {
424 let dist = cw.dist(code);
433 (idx as u8, min_dist)
435 fn can_update_cb(new_cb: &[YUVCode], old_cb: &[YUVCode], cb_size: usize) -> bool {
436 let mut skip_count = 0;
437 for (new, old) in new_cb.iter().zip(old_cb.iter()) {
442 let full_size = cb_size * new_cb.len();
443 let upd_size = cb_size * (new_cb.len() - skip_count) + (new_cb.len() + 31) / 32 * 4;
446 fn write_cb(bw: &mut ByteWriter, mut id: u8, new_cb: &[YUVCode], old_cb: &[YUVCode], grayscale: bool, update: bool, num_elem: usize) -> EncoderResult<()> {
455 let chunk_pos = bw.tell();
457 for entry in new_cb.iter().take(num_elem) {
458 bw.write_buf(&entry.y)?;
460 bw.write_byte(entry.u ^ 0x80)?;
461 bw.write_byte(entry.v ^ 0x80)?;
465 let mut end = num_elem;
466 for (i, (ncw, ocw)) in new_cb.iter().zip(old_cb.iter()).enumerate().take(num_elem).rev() {
473 for i in (0..end).step_by(32) {
477 if new_cb[i + j] != old_cb[i + j] {
481 bw.write_u32be(mask)?;
483 if new_cb[i + j] == old_cb[i + j] { continue; }
484 bw.write_buf(&new_cb[i + j].y)?;
486 bw.write_byte(new_cb[i + j].u ^ 0x80)?;
487 bw.write_byte(new_cb[i + j].v ^ 0x80)?;
492 patch_size(bw, chunk_pos)?;
495 fn render_stripe(&mut self, intra: bool, start: usize, end: usize) {
496 if let Some(ref mut dst_frm) = self.lastfrm {
497 let ystride = dst_frm.get_stride(0);
498 let mut yoff = dst_frm.get_offset(0) + start * ystride;
499 let ustride = dst_frm.get_stride(1);
500 let mut uoff = dst_frm.get_offset(1) + start / 2 * ustride;
501 let vstride = dst_frm.get_stride(2);
502 let mut voff = dst_frm.get_offset(2) + start / 2 * vstride;
503 let (width, _) = dst_frm.get_dimensions(0);
504 let data = dst_frm.get_data_mut().unwrap();
505 let mut miter = self.masks.masks.iter();
506 let mut v1_iter = self.v1_idx.iter();
507 let mut v4_iter = self.v4_idx.iter();
508 let mut cur_mask = 0;
510 for _ in (start..end).step_by(4) {
511 for x in (0..width).step_by(4) {
513 if !intra || !self.v1_idx.is_empty() {
514 cur_mask = *miter.next().unwrap();
516 cur_mask = 0xFFFFFFFF;
521 if (cur_mask & cur_bit) == 0 {
527 cur_mask = *miter.next().unwrap();
531 if (cur_mask & cur_bit) == 0 {
532 let idx = *v1_iter.next().unwrap() as usize;
533 let cb = &self.v1_cur_cb[self.cur_strip][idx];
535 let mut coff = yoff + x;
536 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
537 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
539 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
540 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
542 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
543 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
545 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
546 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
549 let mut coff = uoff + x / 2;
550 data[coff] = cb.u; data[coff + 1] = cb.u;
552 data[coff] = cb.u; data[coff + 1] = cb.u;
554 let mut coff = voff + x / 2;
555 data[coff] = cb.v; data[coff + 1] = cb.v;
557 data[coff] = cb.v; data[coff + 1] = cb.v;
560 let idx0 = *v4_iter.next().unwrap() as usize;
561 let cb0 = &self.v4_cur_cb[self.cur_strip][idx0];
562 let idx1 = *v4_iter.next().unwrap() as usize;
563 let cb1 = &self.v4_cur_cb[self.cur_strip][idx1];
564 let idx2 = *v4_iter.next().unwrap() as usize;
565 let cb2 = &self.v4_cur_cb[self.cur_strip][idx2];
566 let idx3 = *v4_iter.next().unwrap() as usize;
567 let cb3 = &self.v4_cur_cb[self.cur_strip][idx3];
569 let mut coff = yoff + x;
570 data[coff] = cb0.y[0]; data[coff + 1] = cb0.y[1];
571 data[coff + 2] = cb1.y[0]; data[coff + 3] = cb1.y[1];
573 data[coff] = cb0.y[2]; data[coff + 1] = cb0.y[3];
574 data[coff + 2] = cb1.y[2]; data[coff + 3] = cb1.y[3];
576 data[coff] = cb2.y[0]; data[coff + 1] = cb2.y[1];
577 data[coff + 2] = cb3.y[0]; data[coff + 3] = cb3.y[1];
579 data[coff] = cb2.y[2]; data[coff + 1] = cb2.y[3];
580 data[coff + 2] = cb3.y[2]; data[coff + 3] = cb3.y[3];
583 let mut coff = uoff + x / 2;
584 data[coff] = cb0.u; data[coff + 1] = cb1.u;
586 data[coff] = cb2.u; data[coff + 1] = cb3.u;
588 let mut coff = voff + x / 2;
589 data[coff] = cb0.v; data[coff + 1] = cb1.v;
591 data[coff] = cb2.v; data[coff + 1] = cb3.v;
604 fn calc_skip_dist(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
605 self.skip_dist.clear();
606 if let Some(ref ref_frm) = self.lastfrm {
607 let rystride = ref_frm.get_stride(0);
608 let mut ryoff = ref_frm.get_offset(0) + start * rystride;
609 let rustride = ref_frm.get_stride(1);
610 let mut ruoff = ref_frm.get_offset(1) + start / 2 * rustride;
611 let rvstride = ref_frm.get_stride(2);
612 let mut rvoff = ref_frm.get_offset(2) + start / 2 * rvstride;
613 let (width, _) = ref_frm.get_dimensions(0);
614 let rdata = ref_frm.get_data();
616 let iystride = in_frm.get_stride(0);
617 let mut iyoff = in_frm.get_offset(0) + start * iystride;
618 let iustride = in_frm.get_stride(1);
619 let mut iuoff = in_frm.get_offset(1) + start / 2 * iustride;
620 let ivstride = in_frm.get_stride(2);
621 let mut ivoff = in_frm.get_offset(2) + start / 2 * ivstride;
622 let idata = in_frm.get_data();
624 for _ in (start..end).step_by(4) {
625 for x in (0..width).step_by(4) {
627 let mut roff = ryoff + x;
628 let mut ioff = iyoff + x;
631 let d = i32::from(rdata[roff + i]) - i32::from(idata[ioff + i]);
638 let mut roff = ruoff + x / 2;
639 let mut ioff = iuoff + x / 2;
640 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
642 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
644 roff += rustride; ioff += iustride;
645 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
647 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
650 let mut roff = rvoff + x / 2;
651 let mut ioff = ivoff + x / 2;
652 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
654 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
656 roff += rvstride; ioff += ivstride;
657 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
659 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
662 self.skip_dist.push(dist as u32);
665 iyoff += iystride * 4;
666 iuoff += iustride * 2;
667 ivoff += ivstride * 2;
668 ryoff += rystride * 4;
669 ruoff += rustride * 2;
670 rvoff += rvstride * 2;
676 fn quant_vectors(&mut self) {
679 self.v1_len = elbg_quant(&self.v1_entries, &mut self.v1_cur_cb[self.cur_strip]);
680 self.v4_len = if !self.force_v1 {
681 elbg_quant(&self.v4_entries, &mut self.v4_cur_cb[self.cur_strip])
687 for bin in self.fst_bins.iter_mut() {
690 self.v1_len = quant_fast(&mut self.fst_bins, &self.v1_entries, &mut self.v1_cur_cb[self.cur_strip]);
691 self.v4_len = if !self.force_v1 {
692 quant_fast(&mut self.fst_bins, &self.v4_entries, &mut self.v4_cur_cb[self.cur_strip])
697 QuantMode::MedianCut => {
698 self.v1_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb[self.cur_strip]);
700 self.v4_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb[self.cur_strip]);
707 for e in self.v1_cur_cb[self.cur_strip].iter_mut().skip(self.v1_len) { *e = YUVCode::default(); }
708 for e in self.v4_cur_cb[self.cur_strip].iter_mut().skip(self.v4_len) { *e = YUVCode::default(); }
710 fn refine_vectors(&mut self) {
713 self.v1_len = if !self.v1_cand.is_empty() {
714 elbg_quant(&self.v1_cand, &mut self.v1_cur_cb[self.cur_strip])
718 self.v4_len = if !self.force_v1 && !self.v4_cand.is_empty() {
719 elbg_quant(&self.v4_cand, &mut self.v4_cur_cb[self.cur_strip])
725 for bin in self.fst_bins.iter_mut() {
728 self.v1_len = if !self.v1_cand.is_empty() {
729 quant_fast(&mut self.fst_bins, &self.v1_cand, &mut self.v1_cur_cb[self.cur_strip])
733 self.v4_len = if !self.force_v1 && !self.v4_cand.is_empty() {
734 quant_fast(&mut self.fst_bins, &self.v4_cand, &mut self.v4_cur_cb[self.cur_strip])
739 QuantMode::MedianCut => {
740 self.v1_len = if !self.v1_cand.is_empty() {
741 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_cand, &mut self.v1_cur_cb[self.cur_strip])
745 if !self.force_v1 && !self.v4_cand.is_empty() {
746 self.v4_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_cand, &mut self.v4_cur_cb[self.cur_strip]);
753 for e in self.v1_cur_cb[self.cur_strip].iter_mut().skip(self.v1_len) { *e = YUVCode::default(); }
754 for e in self.v4_cur_cb[self.cur_strip].iter_mut().skip(self.v4_len) { *e = YUVCode::default(); }
756 fn encode_intra(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
757 let (width, height) = in_frm.get_dimensions(0);
758 let mut strip_h = (height / self.nstrips + 3) & !3;
763 let mut start_line = 0;
764 let mut end_line = strip_h;
766 bw.write_byte(0)?; // intra flag
767 bw.write_u24be(0)?; // frame size
768 let frame_data_pos = bw.tell();
769 bw.write_u16be(width as u16)?;
770 bw.write_u16be(height as u16)?;
771 bw.write_u16be(self.nstrips as u16)?;
774 for entry in self.v1_cb[self.cur_strip].iter_mut() {
775 self.rng.fill_entry(entry);
777 for entry in self.v4_cb[self.cur_strip].iter_mut() {
778 self.rng.fill_entry(entry);
780 while start_line < height {
781 self.read_strip(in_frm, start_line, end_line);
783 if self.cur_strip > 0 {
784 self.v1_cb[self.cur_strip] = self.v1_cb[self.cur_strip - 1];
785 self.v4_cb[self.cur_strip] = self.v4_cb[self.cur_strip - 1];
787 self.quant_vectors();
789 for cw in self.v1_cur_cb[self.cur_strip].iter_mut() {
793 for cw in self.v4_cur_cb[self.cur_strip].iter_mut() {
804 self.v1_cand.clear();
805 self.v4_cand.clear();
806 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
807 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb[self.cur_strip][..self.v1_len], *v1_entry);
808 if v1_dist == 0 || self.force_v1 {
811 self.v1_idx.push(v1_idx);
813 self.cmode.push(CodingMode::V1);
814 self.v1_cand.push(*v1_entry);
818 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[0]);
819 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[1]);
820 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[2]);
821 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[3]);
822 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
825 self.v4_idx.push(v40_idx);
826 self.v4_idx.push(v41_idx);
827 self.v4_idx.push(v42_idx);
828 self.v4_idx.push(v43_idx);
830 self.cmode.push(CodingMode::V4);
831 self.v4_cand.extend_from_slice(v4_entries);
836 self.v1_idx.push(v1_idx);
838 self.cmode.push(CodingMode::V1);
839 self.v1_cand.push(*v1_entry);
844 self.refine_vectors();
845 let mut v1_src = self.v1_cand.iter();
846 let mut v4_src = self.v4_cand.chunks_exact(4);
847 for &cmode in self.cmode.iter() {
849 CodingMode::Skip => unreachable!(),
851 let v1_entry = v1_src.next().unwrap();
852 let (v1_idx, _) = Self::find_nearest(&self.v1_cur_cb[self.cur_strip][..self.v1_len], *v1_entry);
854 self.v1_idx.push(v1_idx);
857 let v4_entries = v4_src.next().unwrap();
858 let (v40_idx, _) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[0]);
859 let (v41_idx, _) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[1]);
860 let (v42_idx, _) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[2]);
861 let (v43_idx, _) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[3]);
864 self.v4_idx.push(v40_idx);
865 self.v4_idx.push(v41_idx);
866 self.v4_idx.push(v42_idx);
867 self.v4_idx.push(v43_idx);
874 let mut is_intra_strip = start_line == 0;
875 let (upd_v1, upd_v4) = if !is_intra_strip {
876 let cb_size = if self.grayscale { 4 } else { 6 };
877 (Self::can_update_cb(&self.v1_cur_cb[self.cur_strip][..self.v1_len], &self.v1_cb[self.cur_strip][..self.v1_len], cb_size),
878 Self::can_update_cb(&self.v4_cur_cb[self.cur_strip][..self.v4_len], &self.v4_cb[self.cur_strip][..self.v4_len], cb_size))
882 if !is_intra_strip && !upd_v1 && !upd_v4 {
883 is_intra_strip = true;
885 bw.write_byte(if is_intra_strip { 0x10 } else { 0x11 })?;
886 bw.write_u24be(0)?; // strip size
887 let strip_data_pos = bw.tell();
888 bw.write_u16be(0)?; // yoff
889 bw.write_u16be(0)?; // xoff
890 bw.write_u16be((end_line - start_line) as u16)?;
891 bw.write_u16be(width as u16)?;
893 Self::write_cb(bw, 0x20, &self.v4_cur_cb[self.cur_strip], &self.v4_cb[self.cur_strip], self.grayscale, upd_v4, self.v4_len)?;
894 Self::write_cb(bw, 0x22, &self.v1_cur_cb[self.cur_strip], &self.v1_cb[self.cur_strip], self.grayscale, upd_v1, self.v1_len)?;
896 self.render_stripe(true, start_line, end_line);
898 if self.v4_idx.is_empty() {
899 bw.write_byte(0x32)?;
900 bw.write_u24be((self.v1_idx.len() + 4) as u32)?;
901 bw.write_buf(self.v1_idx.as_slice())?;
903 bw.write_byte(0x30)?;
905 let chunk_pos = bw.tell();
915 for mask in self.masks.masks.iter() {
916 bw.write_u32be(*mask)?;
917 for j in (0..32).rev() {
918 if (mask & (1 << j)) == 0 {
919 bw.write_byte(self.v1_idx[v1_pos])?;
922 bw.write_byte(self.v4_idx[v4_pos])?;
923 bw.write_byte(self.v4_idx[v4_pos + 1])?;
924 bw.write_byte(self.v4_idx[v4_pos + 2])?;
925 bw.write_byte(self.v4_idx[v4_pos + 3])?;
930 patch_size(bw, chunk_pos)?;
933 patch_size(bw, strip_data_pos)?;
935 self.v1_cb[self.cur_strip].copy_from_slice(&self.v1_cur_cb[self.cur_strip]);
936 self.v4_cb[self.cur_strip].copy_from_slice(&self.v4_cur_cb[self.cur_strip]);
937 start_line = end_line;
938 end_line = (end_line + strip_h).min(height);
942 patch_size(bw, frame_data_pos)?;
945 fn encode_inter(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
946 let (width, height) = in_frm.get_dimensions(0);
947 let mut strip_h = (height / self.nstrips + 3) & !3;
952 let mut start_line = 0;
953 let mut end_line = strip_h;
955 bw.write_byte(1)?; // intra flag
956 bw.write_u24be(0)?; // frame size
957 let frame_data_pos = bw.tell();
958 bw.write_u16be(width as u16)?;
959 bw.write_u16be(height as u16)?;
960 bw.write_u16be(self.nstrips as u16)?;
963 while start_line < height {
964 self.read_strip(in_frm, start_line, end_line);
965 self.calc_skip_dist(in_frm, start_line, end_line);
967 self.quant_vectors();
969 for cw in self.v1_cur_cb[self.cur_strip].iter_mut() {
973 for cw in self.v4_cur_cb[self.cur_strip].iter_mut() {
984 self.v1_cand.clear();
985 self.v4_cand.clear();
987 let mut skip_iter = self.skip_dist.iter();
988 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
989 let skip_dist = *skip_iter.next().unwrap();
992 self.masks.put_inter(true);
994 self.cmode.push(CodingMode::Skip);
998 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb[self.cur_strip][..self.v1_len], *v1_entry);
999 if skip_dist < v1_dist {
1001 self.masks.put_inter(true);
1003 self.cmode.push(CodingMode::Skip);
1006 } else if !self.refine {
1007 self.masks.put_inter(false);
1009 if v1_dist == 0 || self.force_v1 {
1011 self.masks.put_v1();
1012 self.v1_idx.push(v1_idx);
1014 self.cmode.push(CodingMode::V1);
1015 self.v1_cand.push(*v1_entry);
1019 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[0]);
1020 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[1]);
1021 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[2]);
1022 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[3]);
1023 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
1025 self.masks.put_v4();
1026 self.v4_idx.push(v40_idx);
1027 self.v4_idx.push(v41_idx);
1028 self.v4_idx.push(v42_idx);
1029 self.v4_idx.push(v43_idx);
1031 self.cmode.push(CodingMode::V4);
1032 self.v4_cand.extend_from_slice(v4_entries);
1036 self.masks.put_v1();
1037 self.v1_idx.push(v1_idx);
1039 self.cmode.push(CodingMode::V1);
1040 self.v1_cand.push(*v1_entry);
1045 self.refine_vectors();
1046 let mut v1_src = self.v1_cand.iter();
1047 let mut v4_src = self.v4_cand.chunks_exact(4);
1048 for &cmode in self.cmode.iter() {
1050 CodingMode::Skip => {
1051 self.masks.put_inter(true);
1054 let v1_entry = v1_src.next().unwrap();
1055 let (v1_idx, _) = Self::find_nearest(&self.v1_cur_cb[self.cur_strip][..self.v1_len], *v1_entry);
1056 self.masks.put_inter(false);
1057 self.masks.put_v1();
1058 self.v1_idx.push(v1_idx);
1061 let v4_entries = v4_src.next().unwrap();
1062 let (v40_idx, _) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[0]);
1063 let (v41_idx, _) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[1]);
1064 let (v42_idx, _) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[2]);
1065 let (v43_idx, _) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[3]);
1067 self.masks.put_inter(false);
1068 self.masks.put_v4();
1069 self.v4_idx.push(v40_idx);
1070 self.v4_idx.push(v41_idx);
1071 self.v4_idx.push(v42_idx);
1072 self.v4_idx.push(v43_idx);
1079 let (upd_v1, upd_v4) = {
1080 let cb_size = if self.grayscale { 4 } else { 6 };
1081 (Self::can_update_cb(&self.v1_cur_cb[self.cur_strip][..self.v1_len], &self.v1_cb[self.cur_strip][..self.v1_len], cb_size),
1082 Self::can_update_cb(&self.v4_cur_cb[self.cur_strip][..self.v4_len], &self.v4_cb[self.cur_strip][..self.v4_len], cb_size))
1084 bw.write_byte(0x11)?;
1085 bw.write_u24be(0)?; // strip size
1086 let strip_data_pos = bw.tell();
1087 bw.write_u16be(0)?; // yoff
1088 bw.write_u16be(0)?; // xoff
1089 bw.write_u16be((end_line - start_line) as u16)?;
1090 bw.write_u16be(width as u16)?;
1092 Self::write_cb(bw, 0x20, &self.v4_cur_cb[self.cur_strip], &self.v4_cb[self.cur_strip], self.grayscale, upd_v4, self.v4_len)?;
1093 Self::write_cb(bw, 0x22, &self.v1_cur_cb[self.cur_strip], &self.v1_cb[self.cur_strip], self.grayscale, upd_v1, self.v1_len)?;
1095 self.render_stripe(false, start_line, end_line);
1097 bw.write_byte(0x31)?;
1099 let chunk_pos = bw.tell();
1103 self.v1_idx.push(0);
1104 self.v4_idx.push(0);
1105 self.v4_idx.push(0);
1106 self.v4_idx.push(0);
1107 self.v4_idx.push(0);
1109 let mut skip = true;
1110 for mask in self.masks.masks.iter() {
1111 bw.write_u32be(*mask)?;
1112 if *mask == 0 && skip { continue; }
1113 let mut bit = 1 << 31;
1116 skip = (mask & bit) == 0;
1119 if (mask & bit) == 0 {
1120 bw.write_byte(self.v1_idx[v1_pos])?;
1123 bw.write_byte(self.v4_idx[v4_pos])?;
1124 bw.write_byte(self.v4_idx[v4_pos + 1])?;
1125 bw.write_byte(self.v4_idx[v4_pos + 2])?;
1126 bw.write_byte(self.v4_idx[v4_pos + 3])?;
1134 patch_size(bw, chunk_pos)?;
1136 patch_size(bw, strip_data_pos)?;
1138 self.v1_cb[self.cur_strip].copy_from_slice(&self.v1_cur_cb[self.cur_strip]);
1139 self.v4_cb[self.cur_strip].copy_from_slice(&self.v4_cur_cb[self.cur_strip]);
1140 start_line = end_line;
1141 end_line = (end_line + strip_h).min(height);
1143 self.cur_strip += 1;
1145 patch_size(bw, frame_data_pos)?;
1150 impl NAEncoder for CinepakEncoder {
1151 fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult<EncodeParameters> {
1152 match encinfo.format {
1153 NACodecTypeInfo::None => {
1154 Ok(EncodeParameters {
1155 format: NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, YUV420_FORMAT)),
1156 ..Default::default()
1159 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
1160 NACodecTypeInfo::Video(vinfo) => {
1161 let pix_fmt = if vinfo.format == GRAY_FORMAT { GRAY_FORMAT } else { YUV420_FORMAT };
1162 let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, false, pix_fmt);
1163 let mut ofmt = *encinfo;
1164 ofmt.format = NACodecTypeInfo::Video(outinfo);
1169 fn get_capabilities(&self) -> u64 { 0 }
1170 fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult<NAStreamRef> {
1171 match encinfo.format {
1172 NACodecTypeInfo::None => Err(EncoderError::FormatError),
1173 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
1174 NACodecTypeInfo::Video(vinfo) => {
1175 if vinfo.format != YUV420_FORMAT && vinfo.format != GRAY_FORMAT {
1176 return Err(EncoderError::FormatError);
1178 if ((vinfo.width | vinfo.height) & 3) != 0 {
1179 return Err(EncoderError::FormatError);
1181 if (vinfo.width | vinfo.height) >= (1 << 16) {
1182 return Err(EncoderError::FormatError);
1185 let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, false, vinfo.format);
1186 let info = NACodecInfo::new("cinepak", NACodecTypeInfo::Video(out_info), None);
1187 let mut stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den, 0);
1188 stream.set_num(stream_id as usize);
1189 let stream = stream.into_ref();
1191 self.stream = Some(stream.clone());
1192 self.quality = encinfo.quality;
1193 self.grayscale = vinfo.format != YUV420_FORMAT;
1194 let num_blocks = vinfo.width / 2 * vinfo.height / 2;
1195 self.v1_entries = Vec::with_capacity(num_blocks);
1196 self.v4_entries = Vec::with_capacity(num_blocks * 4);
1197 self.v1_idx = Vec::with_capacity(num_blocks);
1198 self.v4_idx = Vec::with_capacity(num_blocks * 4);
1199 self.skip_dist = Vec::with_capacity(vinfo.width / 4 * vinfo.height / 4);
1201 let buf = alloc_video_buffer(out_info, 2)?;
1202 self.lastfrm = Some(buf.get_vbuf().unwrap());
1208 fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> {
1209 let buf = frm.get_buffer();
1210 if let Some(ref vbuf) = buf.get_vbuf() {
1211 if self.nstrips == 0 {
1212 let (w, h) = vbuf.get_dimensions(0);
1213 self.nstrips = ((((w * h) >> 4) + 1200) / 2400).max(1).min(3);
1214 let strip_h = ((h + self.nstrips - 1) / self.nstrips + 3) & !3;
1215 self.nstrips = (h + strip_h - 1) / strip_h;
1217 let cur_strips = self.v1_cb.len();
1218 if cur_strips != self.nstrips {
1221 if cur_strips < self.nstrips {
1222 for _ in cur_strips..self.nstrips {
1223 self.v1_cb.push([YUVCode::default(); 256]);
1224 self.v4_cb.push([YUVCode::default(); 256]);
1225 self.v1_cur_cb.push([YUVCode::default(); 256]);
1226 self.v4_cur_cb.push([YUVCode::default(); 256]);
1230 let mut dbuf = Vec::with_capacity(4);
1231 let mut gw = GrowableMemoryWriter::new_write(&mut dbuf);
1232 let mut bw = ByteWriter::new(&mut gw);
1233 let is_intra = if self.frmcount == 0 {
1234 self.encode_intra(&mut bw, vbuf)?
1236 self.encode_inter(&mut bw, vbuf)?
1238 self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf));
1240 if self.frmcount == self.key_int {
1245 Err(EncoderError::InvalidParameters)
1248 fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>> {
1249 let mut npkt = None;
1250 std::mem::swap(&mut self.pkt, &mut npkt);
1253 fn flush(&mut self) -> EncoderResult<()> {
1259 const ENCODER_OPTS: &[NAOptionDefinition] = &[
1260 NAOptionDefinition {
1261 name: KEYFRAME_OPTION, description: KEYFRAME_OPTION_DESC,
1262 opt_type: NAOptionDefinitionType::Int(Some(0), Some(128)) },
1263 NAOptionDefinition {
1264 name: "nstrips", description: "Number of strips per frame (0 - automatic)",
1265 opt_type: NAOptionDefinitionType::Int(Some(0), Some(16)) },
1266 NAOptionDefinition {
1267 name: "quant_mode", description: "Quantisation mode",
1268 opt_type: NAOptionDefinitionType::String(Some(&["elbg", "fast", "mediancut"])) },
1269 NAOptionDefinition {
1270 name: "force_v1", description: "Force coarse (V1-only) mode",
1271 opt_type: NAOptionDefinitionType::Bool },
1272 NAOptionDefinition {
1273 name: "refine", description: "Try to improve coded picture",
1274 opt_type: NAOptionDefinitionType::Bool },
1277 impl NAOptionHandler for CinepakEncoder {
1278 fn get_supported_options(&self) -> &[NAOptionDefinition] { ENCODER_OPTS }
1279 fn set_options(&mut self, options: &[NAOption]) {
1280 for option in options.iter() {
1281 for opt_def in ENCODER_OPTS.iter() {
1282 if opt_def.check(option).is_ok() {
1284 KEYFRAME_OPTION => {
1285 if let NAValue::Int(intval) = option.value {
1286 self.key_int = intval as u8;
1290 if let NAValue::Int(intval) = option.value {
1291 self.nstrips = intval as usize;
1295 if let NAValue::String(ref strval) = option.value {
1296 match strval.as_str() {
1297 "elbg" => self.qmode = QuantMode::ELBG,
1298 "fast" => self.qmode = QuantMode::Fast,
1299 "mediancut" => self.qmode = QuantMode::MedianCut,
1305 if let NAValue::Bool(val) = option.value {
1306 self.force_v1 = val;
1310 if let NAValue::Bool(val) = option.value {
1320 fn query_option_value(&self, name: &str) -> Option<NAValue> {
1322 KEYFRAME_OPTION => Some(NAValue::Int(i64::from(self.key_int))),
1323 "nstrips" => Some(NAValue::Int(self.nstrips as i64)),
1324 "quant_mode" => Some(NAValue::String(self.qmode.to_string())),
1325 "force_v1" => Some(NAValue::Bool(self.force_v1)),
1326 "refine" => Some(NAValue::Bool(self.refine)),
1332 pub fn get_encoder() -> Box<dyn NAEncoder + Send> {
1333 Box::new(CinepakEncoder::new())
1338 use nihav_core::codecs::*;
1339 use nihav_core::demuxers::*;
1340 use nihav_core::muxers::*;
1342 use nihav_codec_support::test::enc_video::*;
1345 fn test_cinepak_encoder() {
1346 let mut dmx_reg = RegisteredDemuxers::new();
1347 generic_register_all_demuxers(&mut dmx_reg);
1348 let mut dec_reg = RegisteredDecoders::new();
1349 generic_register_all_decoders(&mut dec_reg);
1350 let mut mux_reg = RegisteredMuxers::new();
1351 generic_register_all_muxers(&mut mux_reg);
1352 let mut enc_reg = RegisteredEncoders::new();
1353 generic_register_all_encoders(&mut enc_reg);
1355 // sample: https://samples.mplayerhq.hu/V-codecs/UCOD/TalkingHead_352x288.avi
1356 let dec_config = DecoderTestParams {
1358 in_name: "assets/Misc/TalkingHead_352x288.avi",
1359 stream_type: StreamType::Video,
1363 let enc_config = EncoderTestParams {
1365 enc_name: "cinepak",
1366 out_name: "cinepak.avi",
1369 let dst_vinfo = NAVideoInfo {
1372 format: YUV420_FORMAT,
1376 let enc_params = EncodeParameters {
1377 format: NACodecTypeInfo::Video(dst_vinfo),
1384 let enc_options = &[
1385 NAOption { name: "quant_mode", value: NAValue::String("mediancut".to_string()) },
1387 //test_encoding_to_file(&dec_config, &enc_config, enc_params, enc_options);
1388 test_encoding_md5(&dec_config, &enc_config, enc_params, enc_options,
1389 &[0x1d4690c8, 0x3b15b4b3, 0xc2df3c7b, 0x1a25b159]);