1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::vq::*;
5 #[derive(Default,Clone,Copy,PartialEq,Debug)]
11 impl VQElement for YUVCode {
12 fn dist(&self, rval: Self) -> u32 {
14 for (y0, y1) in self.y.iter().zip(rval.y.iter()) {
15 let yd = i32::from(*y0) - i32::from(*y1);
18 let ud = i32::from(self.u) - i32::from(rval.u);
19 let vd = i32::from(self.v) - i32::from(rval.v);
20 (ysum + ud * ud + vd * vd) as u32
22 fn min_cw() -> Self { YUVCode { y: [0; 4], u: 0, v: 0 } }
23 fn max_cw() -> Self { YUVCode { y: [255; 4], u: 255, v: 255 } }
24 fn min(&self, rval: Self) -> Self {
25 let mut ycode = YUVCode::default();
27 ycode.y[i] = self.y[i].min(rval.y[i]);
29 ycode.u = self.u.min(rval.u);
30 ycode.v = self.v.min(rval.v);
33 fn max(&self, rval: Self) -> Self {
34 let mut ycode = YUVCode::default();
36 ycode.y[i] = self.y[i].max(rval.y[i]);
38 ycode.u = self.u.max(rval.u);
39 ycode.v = self.v.max(rval.v);
42 fn num_components() -> usize { 6 }
43 fn sort_by_component(arr: &mut [Self], component: usize) {
44 let mut counts = [0; 256];
45 for entry in arr.iter() {
46 let idx = match component {
47 0 | 1 | 2 | 3 => entry.y[component],
53 let mut offs = [0; 256];
55 offs[i + 1] = offs[i] + counts[i];
57 let mut dst = vec![YUVCode::default(); arr.len()];
58 for entry in arr.iter() {
59 let idx = match component {
60 0 | 1 | 2 | 3 => entry.y[component],
64 dst[offs[idx]] = *entry;
67 arr.copy_from_slice(dst.as_slice());
69 fn max_dist_component(min: &Self, max: &Self) -> usize {
73 let d = u32::from(max.y[i]) - u32::from(min.y[i]);
79 let ud = u32::from(max.u) - u32::from(min.u);
84 let vd = u32::from(max.v) - u32::from(min.v);
100 impl VQElementSum<YUVCode> for YUVCodeSum {
101 fn zero() -> Self { Self::default() }
102 fn add(&mut self, rval: YUVCode, count: u64) {
104 self.ysum[i] += u64::from(rval.y[i]) * count;
106 self.usum += u64::from(rval.u) * count;
107 self.vsum += u64::from(rval.v) * count;
110 fn get_centroid(&self) -> YUVCode {
112 let mut ycode = YUVCode::default();
114 ycode.y[i] = ((self.ysum[i] + self.count / 2) / self.count) as u8;
116 ycode.u = ((self.usum + self.count / 2) / self.count) as u8;
117 ycode.v = ((self.vsum + self.count / 2) / self.count) as u8;
130 fn new() -> Self { Self { seed: 0x12345678 } }
131 fn next(&mut self) -> u8 {
132 let mut x = self.seed;
133 x ^= x.wrapping_shl(13);
136 (self.seed >> 24) as u8
138 fn fill_entry(&mut self, entry: &mut YUVCode) {
139 for y in entry.y.iter_mut() {
142 entry.u = self.next();
143 entry.v = self.next();
147 const GRAY_FORMAT: NAPixelFormaton = NAPixelFormaton {
148 model: ColorModel::YUV(YUVSubmodel::YUVJ),
150 comp_info: [Some(NAPixelChromaton{h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 0, next_elem: 1}), None, None, None, None],
171 fn reset(&mut self) {
172 self.masks.truncate(0);
176 fn put_v1(&mut self) {
183 fn put_v4(&mut self) {
191 fn put_inter(&mut self, skip: bool) {
193 self.mask |= !skip as u32;
199 fn flush(&mut self) {
200 self.masks.push(self.mask);
205 if self.pos == 0 { return; }
206 while self.pos < 32 {
214 #[derive(Clone,Copy,PartialEq)]
221 impl std::string::ToString for QuantMode {
222 fn to_string(&self) -> String {
224 QuantMode::ELBG => "elbg".to_string(),
225 QuantMode::Hybrid => "hybrid".to_string(),
226 QuantMode::MedianCut => "mediancut".to_string(),
231 struct CinepakEncoder {
232 stream: Option<NAStreamRef>,
233 lastfrm: Option<NAVideoBufferRef<u8>>,
234 pkt: Option<NAPacket>,
240 v1_entries: Vec<YUVCode>,
241 v4_entries: Vec<YUVCode>,
242 v1_cb: [YUVCode; 256],
243 v4_cb: [YUVCode; 256],
244 v1_cur_cb: [YUVCode; 256],
245 v4_cur_cb: [YUVCode; 256],
256 fn avg4(a: u8, b: u8, c: u8, d: u8) -> u8 {
257 ((u16::from(a) + u16::from(b) + u16::from(c) + u16::from(d) + 3) >> 2) as u8
260 fn patch_size(bw: &mut ByteWriter, pos: u64) -> EncoderResult<()> {
261 let size = bw.tell() - pos;
262 bw.seek(SeekFrom::Current(-((size + 3) as i64)))?;
263 bw.write_u24be((size + 4) as u32)?;
264 bw.seek(SeekFrom::End(0))?;
268 impl CinepakEncoder {
275 qmode: QuantMode::MedianCut,
279 v1_entries: Vec::new(),
280 v4_entries: Vec::new(),
281 v1_cb: [YUVCode::default(); 256],
282 v4_cb: [YUVCode::default(); 256],
283 v1_cur_cb: [YUVCode::default(); 256],
284 v4_cur_cb: [YUVCode::default(); 256],
291 masks: MaskWriter::new(),
292 skip_dist: Vec::new(),
295 fn read_strip(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
296 let ystride = in_frm.get_stride(0);
297 let mut yoff = in_frm.get_offset(0) + start * ystride;
298 let ustride = in_frm.get_stride(1);
299 let mut uoff = in_frm.get_offset(1) + start / 2 * ustride;
300 let vstride = in_frm.get_stride(2);
301 let mut voff = in_frm.get_offset(2) + start / 2 * vstride;
302 let (width, _) = in_frm.get_dimensions(0);
303 let data = in_frm.get_data();
304 self.v1_entries.truncate(0);
305 self.v4_entries.truncate(0);
306 for _ in (start..end).step_by(4) {
307 for x in (0..width).step_by(4) {
308 let mut yblk = [0; 16];
309 let mut ublk = [128; 4];
310 let mut vblk = [128; 4];
313 yblk[i + j * 4] = data[yoff + x + i + j * ystride];
319 ublk[i + j * 2] = data[uoff + x / 2 + i + j * ustride];
320 vblk[i + j * 2] = data[voff + x / 2 + i + j * vstride];
324 self.v1_entries.push(YUVCode {
325 y: [avg4(yblk[ 0], yblk[ 1], yblk[ 4], yblk[ 5]),
326 avg4(yblk[ 2], yblk[ 3], yblk[ 6], yblk[ 7]),
327 avg4(yblk[ 8], yblk[ 9], yblk[12], yblk[13]),
328 avg4(yblk[10], yblk[11], yblk[14], yblk[15])],
329 u: avg4(ublk[0], ublk[1], ublk[2], ublk[3]),
330 v: avg4(vblk[0], vblk[1], vblk[2], vblk[3]),
333 let yidx = (i & 1) * 2 + (i & 2) * 4;
334 self.v4_entries.push(YUVCode {
335 y: [ yblk[yidx], yblk[yidx + 1], yblk[yidx + 4], yblk[yidx + 5] ],
346 fn find_nearest(codebook: &[YUVCode], code: YUVCode) -> (u8, u32) {
347 let mut min_dist = std::u32::MAX;
349 for (i, cw) in codebook.iter().enumerate() {
350 let dist = cw.dist(code);
359 (idx as u8, min_dist)
361 fn can_update_cb(new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], cb_size: usize) -> bool {
362 let mut skip_count = 0;
363 for (new, old) in new_cb.iter().zip(old_cb.iter()) {
368 let full_size = cb_size * 256;
369 let upd_size = cb_size * (256 - skip_count) + 64;
372 fn write_cb(bw: &mut ByteWriter, mut id: u8, new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], grayscale: bool, update: bool) -> EncoderResult<()> {
381 let chunk_pos = bw.tell();
383 for entry in new_cb.iter() {
384 bw.write_buf(&entry.y)?;
386 bw.write_byte(entry.u ^ 0x80)?;
387 bw.write_byte(entry.v ^ 0x80)?;
392 for (i, (ncw, ocw)) in new_cb.iter().rev().zip(old_cb.iter().rev()).enumerate() {
399 for i in (0..end).step_by(32) {
403 if new_cb[i + j] != old_cb[i + j] {
407 bw.write_u32be(mask)?;
409 if new_cb[i + j] == old_cb[i + j] { continue; }
410 bw.write_buf(&new_cb[i + j].y)?;
412 bw.write_byte(new_cb[i + j].u ^ 0x80)?;
413 bw.write_byte(new_cb[i + j].v ^ 0x80)?;
418 patch_size(bw, chunk_pos)?;
421 fn render_stripe(&mut self, intra: bool, start: usize, end: usize) {
422 if let Some(ref mut dst_frm) = self.lastfrm {
423 let ystride = dst_frm.get_stride(0);
424 let mut yoff = dst_frm.get_offset(0) + start * ystride;
425 let ustride = dst_frm.get_stride(1);
426 let mut uoff = dst_frm.get_offset(1) + start / 2 * ustride;
427 let vstride = dst_frm.get_stride(2);
428 let mut voff = dst_frm.get_offset(2) + start / 2 * vstride;
429 let (width, _) = dst_frm.get_dimensions(0);
430 let data = dst_frm.get_data_mut().unwrap();
431 let mut miter = self.masks.masks.iter();
432 let mut v1_iter = self.v1_idx.iter();
433 let mut v4_iter = self.v4_idx.iter();
434 let mut cur_mask = 0;
436 for _ in (start..end).step_by(4) {
437 for x in (0..width).step_by(4) {
439 if !intra || !self.v1_idx.is_empty() {
440 cur_mask = *miter.next().unwrap();
442 cur_mask = 0xFFFFFFFF;
447 if (cur_mask & cur_bit) == 0 {
453 cur_mask = *miter.next().unwrap();
457 if (cur_mask & cur_bit) == 0 {
458 let idx = *v1_iter.next().unwrap() as usize;
459 let cb = &self.v1_cur_cb[idx];
461 let mut coff = yoff + x;
462 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
463 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
465 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
466 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
468 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
469 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
471 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
472 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
475 let mut coff = uoff + x / 2;
476 data[coff] = cb.u; data[coff + 1] = cb.u;
478 data[coff] = cb.u; data[coff + 1] = cb.u;
480 let mut coff = voff + x / 2;
481 data[coff] = cb.v; data[coff + 1] = cb.v;
483 data[coff] = cb.v; data[coff + 1] = cb.v;
486 let idx0 = *v4_iter.next().unwrap() as usize;
487 let cb0 = &self.v4_cur_cb[idx0];
488 let idx1 = *v4_iter.next().unwrap() as usize;
489 let cb1 = &self.v4_cur_cb[idx1];
490 let idx2 = *v4_iter.next().unwrap() as usize;
491 let cb2 = &self.v4_cur_cb[idx2];
492 let idx3 = *v4_iter.next().unwrap() as usize;
493 let cb3 = &self.v4_cur_cb[idx3];
495 let mut coff = yoff + x;
496 data[coff] = cb0.y[0]; data[coff + 1] = cb0.y[1];
497 data[coff + 2] = cb1.y[0]; data[coff + 3] = cb1.y[1];
499 data[coff] = cb0.y[2]; data[coff + 1] = cb0.y[3];
500 data[coff + 2] = cb1.y[2]; data[coff + 3] = cb1.y[3];
502 data[coff] = cb2.y[0]; data[coff + 1] = cb2.y[1];
503 data[coff + 2] = cb3.y[0]; data[coff + 3] = cb3.y[1];
505 data[coff] = cb2.y[2]; data[coff + 1] = cb2.y[3];
506 data[coff + 2] = cb3.y[2]; data[coff + 3] = cb3.y[3];
509 let mut coff = uoff + x / 2;
510 data[coff] = cb0.u; data[coff + 1] = cb1.u;
512 data[coff] = cb2.u; data[coff + 1] = cb3.u;
514 let mut coff = voff + x / 2;
515 data[coff] = cb0.v; data[coff + 1] = cb1.v;
517 data[coff] = cb2.v; data[coff + 1] = cb3.v;
530 fn calc_skip_dist(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
531 self.skip_dist.truncate(0);
532 if let Some(ref ref_frm) = self.lastfrm {
533 let rystride = ref_frm.get_stride(0);
534 let mut ryoff = ref_frm.get_offset(0) + start * rystride;
535 let rustride = ref_frm.get_stride(1);
536 let mut ruoff = ref_frm.get_offset(1) + start / 2 * rustride;
537 let rvstride = ref_frm.get_stride(2);
538 let mut rvoff = ref_frm.get_offset(2) + start / 2 * rvstride;
539 let (width, _) = ref_frm.get_dimensions(0);
540 let rdata = ref_frm.get_data();
542 let iystride = in_frm.get_stride(0);
543 let mut iyoff = in_frm.get_offset(0) + start * iystride;
544 let iustride = in_frm.get_stride(1);
545 let mut iuoff = in_frm.get_offset(1) + start / 2 * iustride;
546 let ivstride = in_frm.get_stride(2);
547 let mut ivoff = in_frm.get_offset(2) + start / 2 * ivstride;
548 let idata = in_frm.get_data();
550 for _ in (start..end).step_by(4) {
551 for x in (0..width).step_by(4) {
553 let mut roff = ryoff + x;
554 let mut ioff = iyoff + x;
557 let d = i32::from(rdata[roff + i]) - i32::from(idata[ioff + i]);
564 let mut roff = ruoff + x / 2;
565 let mut ioff = iuoff + x / 2;
566 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
568 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
570 roff += rustride; ioff += iustride;
571 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
573 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
576 let mut roff = rvoff + x / 2;
577 let mut ioff = ivoff + x / 2;
578 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
580 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
582 roff += rvstride; ioff += ivstride;
583 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
585 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
588 self.skip_dist.push(dist as u32);
591 iyoff += iystride * 4;
592 iuoff += iustride * 2;
593 ivoff += ivstride * 2;
594 ryoff += rystride * 4;
595 ruoff += rustride * 2;
596 rvoff += rvstride * 2;
602 fn quant_vectors(&mut self) {
605 let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cb);
606 let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cb);
608 for entry in self.v1_cb.iter_mut().skip(self.v1_len) {
609 self.rng.fill_entry(entry);
611 for entry in self.v4_cb.iter_mut().skip(self.v4_len) {
612 self.rng.fill_entry(entry);
615 self.v1_len = elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb);
616 self.v4_len = elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb);
618 QuantMode::Hybrid => {
619 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb);
620 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb);
621 let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cur_cb);
622 let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cur_cb);
623 self.v1_len = elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb);
624 self.v4_len = elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb);
626 QuantMode::MedianCut => {
627 self.v1_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb);
628 self.v4_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb);
632 for e in self.v1_cur_cb.iter_mut().skip(self.v1_len) { *e = YUVCode::default(); }
633 for e in self.v4_cur_cb.iter_mut().skip(self.v4_len) { *e = YUVCode::default(); }
635 fn encode_intra(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
636 let (width, height) = in_frm.get_dimensions(0);
637 let mut strip_h = (height / self.nstrips + 3) & !3;
642 let mut start_line = 0;
643 let mut end_line = strip_h;
645 bw.write_byte(0)?; // intra flag
646 bw.write_u24be(0)?; // frame size
647 let frame_data_pos = bw.tell();
648 bw.write_u16be(width as u16)?;
649 bw.write_u16be(height as u16)?;
650 bw.write_u16be(self.nstrips as u16)?;
652 for entry in self.v1_cb.iter_mut() {
653 self.rng.fill_entry(entry);
655 for entry in self.v4_cb.iter_mut() {
656 self.rng.fill_entry(entry);
658 while start_line < height {
659 self.read_strip(in_frm, start_line, end_line);
661 self.quant_vectors();
663 for cw in self.v1_cur_cb.iter_mut() {
667 for cw in self.v4_cur_cb.iter_mut() {
673 self.v1_idx.truncate(0);
674 self.v4_idx.truncate(0);
677 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
678 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb[..self.v1_len], *v1_entry);
681 self.v1_idx.push(v1_idx);
684 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[0]);
685 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[1]);
686 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[2]);
687 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[3]);
688 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
690 self.v4_idx.push(v40_idx);
691 self.v4_idx.push(v41_idx);
692 self.v4_idx.push(v42_idx);
693 self.v4_idx.push(v43_idx);
696 self.v1_idx.push(v1_idx);
701 let mut is_intra_strip = start_line == 0;
702 let (upd_v1, upd_v4) = if !is_intra_strip {
703 let cb_size = if self.grayscale { 4 } else { 6 };
704 (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size),
705 Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size))
709 if !is_intra_strip && !upd_v1 && !upd_v4 {
710 is_intra_strip = true;
712 bw.write_byte(if is_intra_strip { 0x10 } else { 0x11 })?;
713 bw.write_u24be(0)?; // strip size
714 let strip_data_pos = bw.tell();
715 bw.write_u16be(0)?; // yoff
716 bw.write_u16be(0)?; // xoff
717 bw.write_u16be((end_line - start_line) as u16)?;
718 bw.write_u16be(width as u16)?;
720 Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?;
721 Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?;
723 self.render_stripe(true, start_line, end_line);
725 if self.v4_idx.is_empty() {
726 bw.write_byte(0x32)?;
727 bw.write_u24be((self.v1_idx.len() + 4) as u32)?;
728 bw.write_buf(self.v1_idx.as_slice())?;
730 bw.write_byte(0x30)?;
732 let chunk_pos = bw.tell();
742 for mask in self.masks.masks.iter() {
743 bw.write_u32be(*mask)?;
744 for j in (0..32).rev() {
745 if (mask & (1 << j)) == 0 {
746 bw.write_byte(self.v1_idx[v1_pos])?;
749 bw.write_byte(self.v4_idx[v4_pos])?;
750 bw.write_byte(self.v4_idx[v4_pos + 1])?;
751 bw.write_byte(self.v4_idx[v4_pos + 2])?;
752 bw.write_byte(self.v4_idx[v4_pos + 3])?;
757 patch_size(bw, chunk_pos)?;
760 patch_size(bw, strip_data_pos)?;
762 self.v1_cb.copy_from_slice(&self.v1_cur_cb);
763 self.v4_cb.copy_from_slice(&self.v4_cur_cb);
764 start_line = end_line;
765 end_line = (end_line + strip_h).min(height);
767 patch_size(bw, frame_data_pos)?;
770 fn encode_inter(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
771 let (width, height) = in_frm.get_dimensions(0);
772 let mut strip_h = (height / self.nstrips + 3) & !3;
777 let mut start_line = 0;
778 let mut end_line = strip_h;
780 bw.write_byte(1)?; // intra flag
781 bw.write_u24be(0)?; // frame size
782 let frame_data_pos = bw.tell();
783 bw.write_u16be(width as u16)?;
784 bw.write_u16be(height as u16)?;
785 bw.write_u16be(self.nstrips as u16)?;
787 while start_line < height {
788 self.read_strip(in_frm, start_line, end_line);
789 self.calc_skip_dist(in_frm, start_line, end_line);
791 self.quant_vectors();
793 for cw in self.v1_cur_cb.iter_mut() {
797 for cw in self.v4_cur_cb.iter_mut() {
803 self.v1_idx.truncate(0);
804 self.v4_idx.truncate(0);
807 let mut skip_iter = self.skip_dist.iter();
808 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
809 let skip_dist = *skip_iter.next().unwrap();
811 self.masks.put_inter(true);
814 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb[..self.v1_len], *v1_entry);
815 if skip_dist < v1_dist {
816 self.masks.put_inter(true);
819 self.masks.put_inter(false);
823 self.v1_idx.push(v1_idx);
826 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[0]);
827 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[1]);
828 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[2]);
829 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[3]);
830 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
832 self.v4_idx.push(v40_idx);
833 self.v4_idx.push(v41_idx);
834 self.v4_idx.push(v42_idx);
835 self.v4_idx.push(v43_idx);
838 self.v1_idx.push(v1_idx);
843 let (upd_v1, upd_v4) = {
844 let cb_size = if self.grayscale { 4 } else { 6 };
845 (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size),
846 Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size))
848 bw.write_byte(0x11)?;
849 bw.write_u24be(0)?; // strip size
850 let strip_data_pos = bw.tell();
851 bw.write_u16be(0)?; // yoff
852 bw.write_u16be(0)?; // xoff
853 bw.write_u16be((end_line - start_line) as u16)?;
854 bw.write_u16be(width as u16)?;
856 Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?;
857 Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?;
859 self.render_stripe(false, start_line, end_line);
861 bw.write_byte(0x31)?;
863 let chunk_pos = bw.tell();
874 for mask in self.masks.masks.iter() {
875 bw.write_u32be(*mask)?;
876 if *mask == 0 { continue; }
877 let mut bit = 1 << 31;
880 skip = (mask & bit) == 0;
883 if (mask & bit) == 0 {
884 bw.write_byte(self.v1_idx[v1_pos])?;
887 bw.write_byte(self.v4_idx[v4_pos])?;
888 bw.write_byte(self.v4_idx[v4_pos + 1])?;
889 bw.write_byte(self.v4_idx[v4_pos + 2])?;
890 bw.write_byte(self.v4_idx[v4_pos + 3])?;
898 patch_size(bw, chunk_pos)?;
900 patch_size(bw, strip_data_pos)?;
902 self.v1_cb.copy_from_slice(&self.v1_cur_cb);
903 self.v4_cb.copy_from_slice(&self.v4_cur_cb);
904 start_line = end_line;
905 end_line = (end_line + strip_h).min(height);
907 patch_size(bw, frame_data_pos)?;
912 impl NAEncoder for CinepakEncoder {
913 fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult<EncodeParameters> {
914 match encinfo.format {
915 NACodecTypeInfo::None => {
916 let mut ofmt = EncodeParameters::default();
917 ofmt.format = NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, YUV420_FORMAT));
920 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
921 NACodecTypeInfo::Video(vinfo) => {
922 let pix_fmt = if vinfo.format == GRAY_FORMAT { GRAY_FORMAT } else { YUV420_FORMAT };
923 let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, true, pix_fmt);
924 let mut ofmt = *encinfo;
925 ofmt.format = NACodecTypeInfo::Video(outinfo);
930 fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult<NAStreamRef> {
931 match encinfo.format {
932 NACodecTypeInfo::None => Err(EncoderError::FormatError),
933 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
934 NACodecTypeInfo::Video(vinfo) => {
935 if vinfo.format != YUV420_FORMAT && vinfo.format != GRAY_FORMAT {
936 return Err(EncoderError::FormatError);
938 if ((vinfo.width | vinfo.height) & 3) != 0 {
939 return Err(EncoderError::FormatError);
941 if (vinfo.width | vinfo.height) >= (1 << 16) {
942 return Err(EncoderError::FormatError);
945 let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, false, vinfo.format);
946 let info = NACodecInfo::new("cinepak", NACodecTypeInfo::Video(out_info), None);
947 let mut stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den);
948 stream.set_num(stream_id as usize);
949 let stream = stream.into_ref();
951 self.stream = Some(stream.clone());
952 self.quality = encinfo.quality;
953 self.grayscale = vinfo.format != YUV420_FORMAT;
954 let num_blocks = vinfo.width / 2 * vinfo.height / 2;
955 self.v1_entries = Vec::with_capacity(num_blocks);
956 self.v4_entries = Vec::with_capacity(num_blocks * 4);
957 self.v1_idx = Vec::with_capacity(num_blocks);
958 self.v4_idx = Vec::with_capacity(num_blocks * 4);
959 self.skip_dist = Vec::with_capacity(vinfo.width / 4 * vinfo.height / 4);
961 let buf = alloc_video_buffer(out_info, 2)?;
962 self.lastfrm = Some(buf.get_vbuf().unwrap());
968 fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> {
969 let buf = frm.get_buffer();
970 if let Some(ref vbuf) = buf.get_vbuf() {
971 let mut dbuf = Vec::with_capacity(4);
972 let mut gw = GrowableMemoryWriter::new_write(&mut dbuf);
973 let mut bw = ByteWriter::new(&mut gw);
974 let is_intra = if self.frmcount == 0 {
975 self.encode_intra(&mut bw, vbuf)?
977 self.encode_inter(&mut bw, vbuf)?
979 self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf));
981 if self.frmcount == self.key_int {
986 Err(EncoderError::InvalidParameters)
989 fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>> {
991 std::mem::swap(&mut self.pkt, &mut npkt);
994 fn flush(&mut self) -> EncoderResult<()> {
1000 const ENCODER_OPTS: &[NAOptionDefinition] = &[
1001 NAOptionDefinition {
1002 name: KEYFRAME_OPTION, description: KEYFRAME_OPTION_DESC,
1003 opt_type: NAOptionDefinitionType::Int(Some(0), Some(128)) },
1004 NAOptionDefinition {
1005 name: "nstrips", description: "Number of strips per frame (0 - automatic)",
1006 opt_type: NAOptionDefinitionType::Int(Some(0), Some(16)) },
1007 NAOptionDefinition {
1008 name: "quant_mode", description: "Quantisation mode",
1009 opt_type: NAOptionDefinitionType::String(Some(&["elbg", "hybrid", "mediancut"])) },
1012 impl NAOptionHandler for CinepakEncoder {
1013 fn get_supported_options(&self) -> &[NAOptionDefinition] { ENCODER_OPTS }
1014 fn set_options(&mut self, options: &[NAOption]) {
1015 for option in options.iter() {
1016 for opt_def in ENCODER_OPTS.iter() {
1017 if opt_def.check(option).is_ok() {
1019 KEYFRAME_OPTION => {
1020 if let NAValue::Int(intval) = option.value {
1021 self.key_int = intval as u8;
1025 if let NAValue::Int(intval) = option.value {
1026 self.nstrips = intval as usize;
1030 if let NAValue::String(ref str) = option.value {
1031 match str.as_str() {
1032 "elbg" => self.qmode = QuantMode::ELBG,
1033 "hybrid" => self.qmode = QuantMode::Hybrid,
1034 "mediancut" => self.qmode = QuantMode::MedianCut,
1045 fn query_option_value(&self, name: &str) -> Option<NAValue> {
1047 KEYFRAME_OPTION => Some(NAValue::Int(i64::from(self.key_int))),
1048 "nstrips" => Some(NAValue::Int(self.nstrips as i64)),
1049 "quant_mode" => Some(NAValue::String(self.qmode.to_string())),
1055 pub fn get_encoder() -> Box<dyn NAEncoder + Send> {
1056 Box::new(CinepakEncoder::new())
1061 use nihav_core::codecs::*;
1062 use nihav_core::demuxers::*;
1063 use nihav_core::muxers::*;
1065 use nihav_codec_support::test::enc_video::*;
1068 fn test_cinepak_encoder() {
1069 let mut dmx_reg = RegisteredDemuxers::new();
1070 generic_register_all_demuxers(&mut dmx_reg);
1071 let mut dec_reg = RegisteredDecoders::new();
1072 generic_register_all_codecs(&mut dec_reg);
1073 let mut mux_reg = RegisteredMuxers::new();
1074 generic_register_all_muxers(&mut mux_reg);
1075 let mut enc_reg = RegisteredEncoders::new();
1076 generic_register_all_encoders(&mut enc_reg);
1078 let dec_config = DecoderTestParams {
1080 in_name: "assets/Misc/TalkingHead_352x288.avi",
1081 stream_type: StreamType::Video,
1085 let enc_config = EncoderTestParams {
1087 enc_name: "cinepak",
1088 out_name: "cinepak.avi",
1091 let dst_vinfo = NAVideoInfo {
1094 format: YUV420_FORMAT,
1098 let enc_params = EncodeParameters {
1099 format: NACodecTypeInfo::Video(dst_vinfo),
1106 test_encoding_to_file(&dec_config, &enc_config, enc_params);