1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::vq::*;
5 #[derive(Default,Clone,Copy,PartialEq,Debug)]
11 impl VQElement for YUVCode {
12 fn dist(&self, rval: Self) -> u32 {
14 for (y0, y1) in self.y.iter().zip(rval.y.iter()) {
15 let yd = i32::from(*y0) - i32::from(*y1);
18 let ud = i32::from(self.u) - i32::from(rval.u);
19 let vd = i32::from(self.v) - i32::from(rval.v);
20 (ysum + ud * ud + vd * vd) as u32
22 fn min_cw() -> Self { YUVCode { y: [0; 4], u: 0, v: 0 } }
23 fn max_cw() -> Self { YUVCode { y: [255; 4], u: 255, v: 255 } }
24 fn min(&self, rval: Self) -> Self {
25 let mut ycode = YUVCode::default();
27 ycode.y[i] = self.y[i].min(rval.y[i]);
29 ycode.u = self.u.min(rval.u);
30 ycode.v = self.v.min(rval.v);
33 fn max(&self, rval: Self) -> Self {
34 let mut ycode = YUVCode::default();
36 ycode.y[i] = self.y[i].max(rval.y[i]);
38 ycode.u = self.u.max(rval.u);
39 ycode.v = self.v.max(rval.v);
42 fn num_components() -> usize { 6 }
43 fn sort_by_component(arr: &mut [Self], component: usize) {
44 let mut counts = [0; 256];
45 for entry in arr.iter() {
46 let idx = match component {
47 0 | 1 | 2 | 3 => entry.y[component],
53 let mut offs = [0; 256];
55 offs[i + 1] = offs[i] + counts[i];
57 let mut dst = vec![YUVCode::default(); arr.len()];
58 for entry in arr.iter() {
59 let idx = match component {
60 0 | 1 | 2 | 3 => entry.y[component],
64 dst[offs[idx]] = *entry;
67 arr.copy_from_slice(dst.as_slice());
69 fn max_dist_component(min: &Self, max: &Self) -> usize {
73 let d = u32::from(max.y[i]) - u32::from(min.y[i]);
79 let ud = u32::from(max.u) - u32::from(min.u);
84 let vd = u32::from(max.v) - u32::from(min.v);
100 impl VQElementSum<YUVCode> for YUVCodeSum {
101 fn zero() -> Self { Self::default() }
102 fn add(&mut self, rval: YUVCode, count: u64) {
104 self.ysum[i] += u64::from(rval.y[i]) * count;
106 self.usum += u64::from(rval.u) * count;
107 self.vsum += u64::from(rval.v) * count;
110 fn get_centroid(&self) -> YUVCode {
112 let mut ycode = YUVCode::default();
114 ycode.y[i] = ((self.ysum[i] + self.count / 2) / self.count) as u8;
116 ycode.u = ((self.usum + self.count / 2) / self.count) as u8;
117 ycode.v = ((self.vsum + self.count / 2) / self.count) as u8;
130 fn new() -> Self { Self { seed: 0x12345678 } }
131 fn next(&mut self) -> u8 {
132 let mut x = self.seed;
133 x ^= x.wrapping_shl(13);
136 (self.seed >> 24) as u8
138 fn fill_entry(&mut self, entry: &mut YUVCode) {
139 for y in entry.y.iter_mut() {
142 entry.u = self.next();
143 entry.v = self.next();
147 const GRAY_FORMAT: NAPixelFormaton = NAPixelFormaton {
148 model: ColorModel::YUV(YUVSubmodel::YUVJ),
150 comp_info: [Some(NAPixelChromaton{h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 0, next_elem: 1}), None, None, None, None],
171 fn reset(&mut self) {
176 fn put_v1(&mut self) {
183 fn put_v4(&mut self) {
191 fn put_inter(&mut self, skip: bool) {
193 self.mask |= !skip as u32;
199 fn flush(&mut self) {
200 self.masks.push(self.mask);
205 if self.pos == 0 { return; }
206 while self.pos < 32 {
214 #[derive(Clone,Copy,PartialEq)]
221 impl std::string::ToString for QuantMode {
222 fn to_string(&self) -> String {
224 QuantMode::ELBG => "elbg".to_string(),
225 QuantMode::Hybrid => "hybrid".to_string(),
226 QuantMode::MedianCut => "mediancut".to_string(),
231 struct CinepakEncoder {
232 stream: Option<NAStreamRef>,
233 lastfrm: Option<NAVideoBufferRef<u8>>,
234 pkt: Option<NAPacket>,
241 v1_entries: Vec<YUVCode>,
242 v4_entries: Vec<YUVCode>,
243 v1_cb: Vec<[YUVCode; 256]>,
244 v4_cb: Vec<[YUVCode; 256]>,
245 v1_cur_cb: Vec<[YUVCode; 256]>,
246 v4_cur_cb: Vec<[YUVCode; 256]>,
257 fn avg4(a: u8, b: u8, c: u8, d: u8) -> u8 {
258 ((u16::from(a) + u16::from(b) + u16::from(c) + u16::from(d) + 3) >> 2) as u8
261 fn patch_size(bw: &mut ByteWriter, pos: u64) -> EncoderResult<()> {
262 let size = bw.tell() - pos;
263 bw.seek(SeekFrom::Current(-((size + 3) as i64)))?;
264 bw.write_u24be((size + 4) as u32)?;
265 bw.seek(SeekFrom::End(0))?;
269 impl CinepakEncoder {
276 qmode: QuantMode::MedianCut,
281 v1_entries: Vec::new(),
282 v4_entries: Vec::new(),
283 v1_cb: Vec::with_capacity(2),
284 v4_cb: Vec::with_capacity(2),
285 v1_cur_cb: Vec::with_capacity(2),
286 v4_cur_cb: Vec::with_capacity(2),
293 masks: MaskWriter::new(),
294 skip_dist: Vec::new(),
297 fn read_strip(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
298 let ystride = in_frm.get_stride(0);
299 let mut yoff = in_frm.get_offset(0) + start * ystride;
300 let ustride = in_frm.get_stride(1);
301 let mut uoff = in_frm.get_offset(1) + start / 2 * ustride;
302 let vstride = in_frm.get_stride(2);
303 let mut voff = in_frm.get_offset(2) + start / 2 * vstride;
304 let (width, _) = in_frm.get_dimensions(0);
305 let data = in_frm.get_data();
306 self.v1_entries.clear();
307 self.v4_entries.clear();
308 for _ in (start..end).step_by(4) {
309 for x in (0..width).step_by(4) {
310 let mut yblk = [0; 16];
311 let mut ublk = [128; 4];
312 let mut vblk = [128; 4];
315 yblk[i + j * 4] = data[yoff + x + i + j * ystride];
321 ublk[i + j * 2] = data[uoff + x / 2 + i + j * ustride];
322 vblk[i + j * 2] = data[voff + x / 2 + i + j * vstride];
326 self.v1_entries.push(YUVCode {
327 y: [avg4(yblk[ 0], yblk[ 1], yblk[ 4], yblk[ 5]),
328 avg4(yblk[ 2], yblk[ 3], yblk[ 6], yblk[ 7]),
329 avg4(yblk[ 8], yblk[ 9], yblk[12], yblk[13]),
330 avg4(yblk[10], yblk[11], yblk[14], yblk[15])],
331 u: avg4(ublk[0], ublk[1], ublk[2], ublk[3]),
332 v: avg4(vblk[0], vblk[1], vblk[2], vblk[3]),
335 let yidx = (i & 1) * 2 + (i & 2) * 4;
336 self.v4_entries.push(YUVCode {
337 y: [ yblk[yidx], yblk[yidx + 1], yblk[yidx + 4], yblk[yidx + 5] ],
348 fn find_nearest(codebook: &[YUVCode], code: YUVCode) -> (u8, u32) {
349 let mut min_dist = std::u32::MAX;
351 for (i, cw) in codebook.iter().enumerate() {
352 let dist = cw.dist(code);
361 (idx as u8, min_dist)
363 fn can_update_cb(new_cb: &[YUVCode], old_cb: &[YUVCode], cb_size: usize) -> bool {
364 let mut skip_count = 0;
365 for (new, old) in new_cb.iter().zip(old_cb.iter()) {
370 let full_size = cb_size * new_cb.len();
371 let upd_size = cb_size * (new_cb.len() - skip_count) + (new_cb.len() + 31) / 32 * 4;
374 fn write_cb(bw: &mut ByteWriter, mut id: u8, new_cb: &[YUVCode], old_cb: &[YUVCode], grayscale: bool, update: bool, num_elem: usize) -> EncoderResult<()> {
383 let chunk_pos = bw.tell();
385 for entry in new_cb.iter().take(num_elem) {
386 bw.write_buf(&entry.y)?;
388 bw.write_byte(entry.u ^ 0x80)?;
389 bw.write_byte(entry.v ^ 0x80)?;
393 let mut end = num_elem;
394 for (i, (ncw, ocw)) in new_cb.iter().zip(old_cb.iter()).enumerate().take(num_elem).rev() {
401 for i in (0..end).step_by(32) {
405 if new_cb[i + j] != old_cb[i + j] {
409 bw.write_u32be(mask)?;
411 if new_cb[i + j] == old_cb[i + j] { continue; }
412 bw.write_buf(&new_cb[i + j].y)?;
414 bw.write_byte(new_cb[i + j].u ^ 0x80)?;
415 bw.write_byte(new_cb[i + j].v ^ 0x80)?;
420 patch_size(bw, chunk_pos)?;
423 fn render_stripe(&mut self, intra: bool, start: usize, end: usize) {
424 if let Some(ref mut dst_frm) = self.lastfrm {
425 let ystride = dst_frm.get_stride(0);
426 let mut yoff = dst_frm.get_offset(0) + start * ystride;
427 let ustride = dst_frm.get_stride(1);
428 let mut uoff = dst_frm.get_offset(1) + start / 2 * ustride;
429 let vstride = dst_frm.get_stride(2);
430 let mut voff = dst_frm.get_offset(2) + start / 2 * vstride;
431 let (width, _) = dst_frm.get_dimensions(0);
432 let data = dst_frm.get_data_mut().unwrap();
433 let mut miter = self.masks.masks.iter();
434 let mut v1_iter = self.v1_idx.iter();
435 let mut v4_iter = self.v4_idx.iter();
436 let mut cur_mask = 0;
438 for _ in (start..end).step_by(4) {
439 for x in (0..width).step_by(4) {
441 if !intra || !self.v1_idx.is_empty() {
442 cur_mask = *miter.next().unwrap();
444 cur_mask = 0xFFFFFFFF;
449 if (cur_mask & cur_bit) == 0 {
455 cur_mask = *miter.next().unwrap();
459 if (cur_mask & cur_bit) == 0 {
460 let idx = *v1_iter.next().unwrap() as usize;
461 let cb = &self.v1_cur_cb[self.cur_strip][idx];
463 let mut coff = yoff + x;
464 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
465 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
467 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
468 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
470 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
471 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
473 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
474 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
477 let mut coff = uoff + x / 2;
478 data[coff] = cb.u; data[coff + 1] = cb.u;
480 data[coff] = cb.u; data[coff + 1] = cb.u;
482 let mut coff = voff + x / 2;
483 data[coff] = cb.v; data[coff + 1] = cb.v;
485 data[coff] = cb.v; data[coff + 1] = cb.v;
488 let idx0 = *v4_iter.next().unwrap() as usize;
489 let cb0 = &self.v4_cur_cb[self.cur_strip][idx0];
490 let idx1 = *v4_iter.next().unwrap() as usize;
491 let cb1 = &self.v4_cur_cb[self.cur_strip][idx1];
492 let idx2 = *v4_iter.next().unwrap() as usize;
493 let cb2 = &self.v4_cur_cb[self.cur_strip][idx2];
494 let idx3 = *v4_iter.next().unwrap() as usize;
495 let cb3 = &self.v4_cur_cb[self.cur_strip][idx3];
497 let mut coff = yoff + x;
498 data[coff] = cb0.y[0]; data[coff + 1] = cb0.y[1];
499 data[coff + 2] = cb1.y[0]; data[coff + 3] = cb1.y[1];
501 data[coff] = cb0.y[2]; data[coff + 1] = cb0.y[3];
502 data[coff + 2] = cb1.y[2]; data[coff + 3] = cb1.y[3];
504 data[coff] = cb2.y[0]; data[coff + 1] = cb2.y[1];
505 data[coff + 2] = cb3.y[0]; data[coff + 3] = cb3.y[1];
507 data[coff] = cb2.y[2]; data[coff + 1] = cb2.y[3];
508 data[coff + 2] = cb3.y[2]; data[coff + 3] = cb3.y[3];
511 let mut coff = uoff + x / 2;
512 data[coff] = cb0.u; data[coff + 1] = cb1.u;
514 data[coff] = cb2.u; data[coff + 1] = cb3.u;
516 let mut coff = voff + x / 2;
517 data[coff] = cb0.v; data[coff + 1] = cb1.v;
519 data[coff] = cb2.v; data[coff + 1] = cb3.v;
532 fn calc_skip_dist(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
533 self.skip_dist.clear();
534 if let Some(ref ref_frm) = self.lastfrm {
535 let rystride = ref_frm.get_stride(0);
536 let mut ryoff = ref_frm.get_offset(0) + start * rystride;
537 let rustride = ref_frm.get_stride(1);
538 let mut ruoff = ref_frm.get_offset(1) + start / 2 * rustride;
539 let rvstride = ref_frm.get_stride(2);
540 let mut rvoff = ref_frm.get_offset(2) + start / 2 * rvstride;
541 let (width, _) = ref_frm.get_dimensions(0);
542 let rdata = ref_frm.get_data();
544 let iystride = in_frm.get_stride(0);
545 let mut iyoff = in_frm.get_offset(0) + start * iystride;
546 let iustride = in_frm.get_stride(1);
547 let mut iuoff = in_frm.get_offset(1) + start / 2 * iustride;
548 let ivstride = in_frm.get_stride(2);
549 let mut ivoff = in_frm.get_offset(2) + start / 2 * ivstride;
550 let idata = in_frm.get_data();
552 for _ in (start..end).step_by(4) {
553 for x in (0..width).step_by(4) {
555 let mut roff = ryoff + x;
556 let mut ioff = iyoff + x;
559 let d = i32::from(rdata[roff + i]) - i32::from(idata[ioff + i]);
566 let mut roff = ruoff + x / 2;
567 let mut ioff = iuoff + x / 2;
568 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
570 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
572 roff += rustride; ioff += iustride;
573 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
575 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
578 let mut roff = rvoff + x / 2;
579 let mut ioff = ivoff + x / 2;
580 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
582 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
584 roff += rvstride; ioff += ivstride;
585 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
587 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
590 self.skip_dist.push(dist as u32);
593 iyoff += iystride * 4;
594 iuoff += iustride * 2;
595 ivoff += ivstride * 2;
596 ryoff += rystride * 4;
597 ruoff += rustride * 2;
598 rvoff += rvstride * 2;
604 fn quant_vectors(&mut self) {
607 let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cb[self.cur_strip]);
608 let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cb[self.cur_strip]);
610 for entry in self.v1_cb[self.cur_strip].iter_mut().skip(self.v1_len) {
611 self.rng.fill_entry(entry);
613 for entry in self.v4_cb[self.cur_strip].iter_mut().skip(self.v4_len) {
614 self.rng.fill_entry(entry);
617 self.v1_len = elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb[self.cur_strip]);
618 self.v4_len = elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb[self.cur_strip]);
620 QuantMode::Hybrid => {
621 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb[self.cur_strip]);
622 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb[self.cur_strip]);
623 let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cur_cb[self.cur_strip]);
624 let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cur_cb[self.cur_strip]);
625 self.v1_len = elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb[self.cur_strip]);
626 self.v4_len = elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb[self.cur_strip]);
628 QuantMode::MedianCut => {
629 self.v1_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb[self.cur_strip]);
630 self.v4_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb[self.cur_strip]);
634 for e in self.v1_cur_cb[self.cur_strip].iter_mut().skip(self.v1_len) { *e = YUVCode::default(); }
635 for e in self.v4_cur_cb[self.cur_strip].iter_mut().skip(self.v4_len) { *e = YUVCode::default(); }
637 fn encode_intra(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
638 let (width, height) = in_frm.get_dimensions(0);
639 let mut strip_h = (height / self.nstrips + 3) & !3;
644 let mut start_line = 0;
645 let mut end_line = strip_h;
647 bw.write_byte(0)?; // intra flag
648 bw.write_u24be(0)?; // frame size
649 let frame_data_pos = bw.tell();
650 bw.write_u16be(width as u16)?;
651 bw.write_u16be(height as u16)?;
652 bw.write_u16be(self.nstrips as u16)?;
655 for entry in self.v1_cb[self.cur_strip].iter_mut() {
656 self.rng.fill_entry(entry);
658 for entry in self.v4_cb[self.cur_strip].iter_mut() {
659 self.rng.fill_entry(entry);
661 while start_line < height {
662 self.read_strip(in_frm, start_line, end_line);
664 if self.cur_strip > 0 {
665 self.v1_cb[self.cur_strip] = self.v1_cb[self.cur_strip - 1];
666 self.v4_cb[self.cur_strip] = self.v4_cb[self.cur_strip - 1];
668 self.quant_vectors();
670 for cw in self.v1_cur_cb[self.cur_strip].iter_mut() {
674 for cw in self.v4_cur_cb[self.cur_strip].iter_mut() {
684 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
685 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb[self.cur_strip][..self.v1_len], *v1_entry);
688 self.v1_idx.push(v1_idx);
691 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[0]);
692 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[1]);
693 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[2]);
694 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[3]);
695 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
697 self.v4_idx.push(v40_idx);
698 self.v4_idx.push(v41_idx);
699 self.v4_idx.push(v42_idx);
700 self.v4_idx.push(v43_idx);
703 self.v1_idx.push(v1_idx);
708 let mut is_intra_strip = start_line == 0;
709 let (upd_v1, upd_v4) = if !is_intra_strip {
710 let cb_size = if self.grayscale { 4 } else { 6 };
711 (Self::can_update_cb(&self.v1_cur_cb[self.cur_strip][..self.v1_len], &self.v1_cb[self.cur_strip][..self.v1_len], cb_size),
712 Self::can_update_cb(&self.v4_cur_cb[self.cur_strip][..self.v4_len], &self.v4_cb[self.cur_strip][..self.v4_len], cb_size))
716 if !is_intra_strip && !upd_v1 && !upd_v4 {
717 is_intra_strip = true;
719 bw.write_byte(if is_intra_strip { 0x10 } else { 0x11 })?;
720 bw.write_u24be(0)?; // strip size
721 let strip_data_pos = bw.tell();
722 bw.write_u16be(0)?; // yoff
723 bw.write_u16be(0)?; // xoff
724 bw.write_u16be((end_line - start_line) as u16)?;
725 bw.write_u16be(width as u16)?;
727 Self::write_cb(bw, 0x20, &self.v4_cur_cb[self.cur_strip], &self.v4_cb[self.cur_strip], self.grayscale, upd_v4, self.v4_len)?;
728 Self::write_cb(bw, 0x22, &self.v1_cur_cb[self.cur_strip], &self.v1_cb[self.cur_strip], self.grayscale, upd_v1, self.v1_len)?;
730 self.render_stripe(true, start_line, end_line);
732 if self.v4_idx.is_empty() {
733 bw.write_byte(0x32)?;
734 bw.write_u24be((self.v1_idx.len() + 4) as u32)?;
735 bw.write_buf(self.v1_idx.as_slice())?;
737 bw.write_byte(0x30)?;
739 let chunk_pos = bw.tell();
749 for mask in self.masks.masks.iter() {
750 bw.write_u32be(*mask)?;
751 for j in (0..32).rev() {
752 if (mask & (1 << j)) == 0 {
753 bw.write_byte(self.v1_idx[v1_pos])?;
756 bw.write_byte(self.v4_idx[v4_pos])?;
757 bw.write_byte(self.v4_idx[v4_pos + 1])?;
758 bw.write_byte(self.v4_idx[v4_pos + 2])?;
759 bw.write_byte(self.v4_idx[v4_pos + 3])?;
764 patch_size(bw, chunk_pos)?;
767 patch_size(bw, strip_data_pos)?;
769 self.v1_cb[self.cur_strip].copy_from_slice(&self.v1_cur_cb[self.cur_strip]);
770 self.v4_cb[self.cur_strip].copy_from_slice(&self.v4_cur_cb[self.cur_strip]);
771 start_line = end_line;
772 end_line = (end_line + strip_h).min(height);
776 patch_size(bw, frame_data_pos)?;
779 fn encode_inter(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
780 let (width, height) = in_frm.get_dimensions(0);
781 let mut strip_h = (height / self.nstrips + 3) & !3;
786 let mut start_line = 0;
787 let mut end_line = strip_h;
789 bw.write_byte(1)?; // intra flag
790 bw.write_u24be(0)?; // frame size
791 let frame_data_pos = bw.tell();
792 bw.write_u16be(width as u16)?;
793 bw.write_u16be(height as u16)?;
794 bw.write_u16be(self.nstrips as u16)?;
797 while start_line < height {
798 self.read_strip(in_frm, start_line, end_line);
799 self.calc_skip_dist(in_frm, start_line, end_line);
801 self.quant_vectors();
803 for cw in self.v1_cur_cb[self.cur_strip].iter_mut() {
807 for cw in self.v4_cur_cb[self.cur_strip].iter_mut() {
817 let mut skip_iter = self.skip_dist.iter();
818 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
819 let skip_dist = *skip_iter.next().unwrap();
821 self.masks.put_inter(true);
824 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb[self.cur_strip][..self.v1_len], *v1_entry);
825 if skip_dist < v1_dist {
826 self.masks.put_inter(true);
829 self.masks.put_inter(false);
833 self.v1_idx.push(v1_idx);
836 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[0]);
837 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[1]);
838 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[2]);
839 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb[self.cur_strip][..self.v4_len], v4_entries[3]);
840 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
842 self.v4_idx.push(v40_idx);
843 self.v4_idx.push(v41_idx);
844 self.v4_idx.push(v42_idx);
845 self.v4_idx.push(v43_idx);
848 self.v1_idx.push(v1_idx);
853 let (upd_v1, upd_v4) = {
854 let cb_size = if self.grayscale { 4 } else { 6 };
855 (Self::can_update_cb(&self.v1_cur_cb[self.cur_strip][..self.v1_len], &self.v1_cb[self.cur_strip][..self.v1_len], cb_size),
856 Self::can_update_cb(&self.v4_cur_cb[self.cur_strip][..self.v4_len], &self.v4_cb[self.cur_strip][..self.v4_len], cb_size))
858 bw.write_byte(0x11)?;
859 bw.write_u24be(0)?; // strip size
860 let strip_data_pos = bw.tell();
861 bw.write_u16be(0)?; // yoff
862 bw.write_u16be(0)?; // xoff
863 bw.write_u16be((end_line - start_line) as u16)?;
864 bw.write_u16be(width as u16)?;
866 Self::write_cb(bw, 0x20, &self.v4_cur_cb[self.cur_strip], &self.v4_cb[self.cur_strip], self.grayscale, upd_v4, self.v4_len)?;
867 Self::write_cb(bw, 0x22, &self.v1_cur_cb[self.cur_strip], &self.v1_cb[self.cur_strip], self.grayscale, upd_v1, self.v1_len)?;
869 self.render_stripe(false, start_line, end_line);
871 bw.write_byte(0x31)?;
873 let chunk_pos = bw.tell();
884 for mask in self.masks.masks.iter() {
885 bw.write_u32be(*mask)?;
886 if *mask == 0 && skip { continue; }
887 let mut bit = 1 << 31;
890 skip = (mask & bit) == 0;
893 if (mask & bit) == 0 {
894 bw.write_byte(self.v1_idx[v1_pos])?;
897 bw.write_byte(self.v4_idx[v4_pos])?;
898 bw.write_byte(self.v4_idx[v4_pos + 1])?;
899 bw.write_byte(self.v4_idx[v4_pos + 2])?;
900 bw.write_byte(self.v4_idx[v4_pos + 3])?;
908 patch_size(bw, chunk_pos)?;
910 patch_size(bw, strip_data_pos)?;
912 self.v1_cb[self.cur_strip].copy_from_slice(&self.v1_cur_cb[self.cur_strip]);
913 self.v4_cb[self.cur_strip].copy_from_slice(&self.v4_cur_cb[self.cur_strip]);
914 start_line = end_line;
915 end_line = (end_line + strip_h).min(height);
919 patch_size(bw, frame_data_pos)?;
924 impl NAEncoder for CinepakEncoder {
925 fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult<EncodeParameters> {
926 match encinfo.format {
927 NACodecTypeInfo::None => {
928 Ok(EncodeParameters {
929 format: NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, YUV420_FORMAT)),
933 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
934 NACodecTypeInfo::Video(vinfo) => {
935 let pix_fmt = if vinfo.format == GRAY_FORMAT { GRAY_FORMAT } else { YUV420_FORMAT };
936 let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, false, pix_fmt);
937 let mut ofmt = *encinfo;
938 ofmt.format = NACodecTypeInfo::Video(outinfo);
943 fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult<NAStreamRef> {
944 match encinfo.format {
945 NACodecTypeInfo::None => Err(EncoderError::FormatError),
946 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
947 NACodecTypeInfo::Video(vinfo) => {
948 if vinfo.format != YUV420_FORMAT && vinfo.format != GRAY_FORMAT {
949 return Err(EncoderError::FormatError);
951 if ((vinfo.width | vinfo.height) & 3) != 0 {
952 return Err(EncoderError::FormatError);
954 if (vinfo.width | vinfo.height) >= (1 << 16) {
955 return Err(EncoderError::FormatError);
958 let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, false, vinfo.format);
959 let info = NACodecInfo::new("cinepak", NACodecTypeInfo::Video(out_info), None);
960 let mut stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den, 0);
961 stream.set_num(stream_id as usize);
962 let stream = stream.into_ref();
964 self.stream = Some(stream.clone());
965 self.quality = encinfo.quality;
966 self.grayscale = vinfo.format != YUV420_FORMAT;
967 let num_blocks = vinfo.width / 2 * vinfo.height / 2;
968 self.v1_entries = Vec::with_capacity(num_blocks);
969 self.v4_entries = Vec::with_capacity(num_blocks * 4);
970 self.v1_idx = Vec::with_capacity(num_blocks);
971 self.v4_idx = Vec::with_capacity(num_blocks * 4);
972 self.skip_dist = Vec::with_capacity(vinfo.width / 4 * vinfo.height / 4);
974 let buf = alloc_video_buffer(out_info, 2)?;
975 self.lastfrm = Some(buf.get_vbuf().unwrap());
981 fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> {
982 let buf = frm.get_buffer();
983 if let Some(ref vbuf) = buf.get_vbuf() {
984 let cur_strips = self.v1_cb.len();
985 if cur_strips != self.nstrips {
988 if cur_strips < self.nstrips {
989 for _ in cur_strips..self.nstrips {
990 self.v1_cb.push([YUVCode::default(); 256]);
991 self.v4_cb.push([YUVCode::default(); 256]);
992 self.v1_cur_cb.push([YUVCode::default(); 256]);
993 self.v4_cur_cb.push([YUVCode::default(); 256]);
997 let mut dbuf = Vec::with_capacity(4);
998 let mut gw = GrowableMemoryWriter::new_write(&mut dbuf);
999 let mut bw = ByteWriter::new(&mut gw);
1000 let is_intra = if self.frmcount == 0 {
1001 self.encode_intra(&mut bw, vbuf)?
1003 self.encode_inter(&mut bw, vbuf)?
1005 self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf));
1007 if self.frmcount == self.key_int {
1012 Err(EncoderError::InvalidParameters)
1015 fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>> {
1016 let mut npkt = None;
1017 std::mem::swap(&mut self.pkt, &mut npkt);
1020 fn flush(&mut self) -> EncoderResult<()> {
1026 const ENCODER_OPTS: &[NAOptionDefinition] = &[
1027 NAOptionDefinition {
1028 name: KEYFRAME_OPTION, description: KEYFRAME_OPTION_DESC,
1029 opt_type: NAOptionDefinitionType::Int(Some(0), Some(128)) },
1030 NAOptionDefinition {
1031 name: "nstrips", description: "Number of strips per frame (0 - automatic)",
1032 opt_type: NAOptionDefinitionType::Int(Some(0), Some(16)) },
1033 NAOptionDefinition {
1034 name: "quant_mode", description: "Quantisation mode",
1035 opt_type: NAOptionDefinitionType::String(Some(&["elbg", "hybrid", "mediancut"])) },
1038 impl NAOptionHandler for CinepakEncoder {
1039 fn get_supported_options(&self) -> &[NAOptionDefinition] { ENCODER_OPTS }
1040 fn set_options(&mut self, options: &[NAOption]) {
1041 for option in options.iter() {
1042 for opt_def in ENCODER_OPTS.iter() {
1043 if opt_def.check(option).is_ok() {
1045 KEYFRAME_OPTION => {
1046 if let NAValue::Int(intval) = option.value {
1047 self.key_int = intval as u8;
1051 if let NAValue::Int(intval) = option.value {
1052 self.nstrips = intval as usize;
1056 if let NAValue::String(ref strval) = option.value {
1057 match strval.as_str() {
1058 "elbg" => self.qmode = QuantMode::ELBG,
1059 "hybrid" => self.qmode = QuantMode::Hybrid,
1060 "mediancut" => self.qmode = QuantMode::MedianCut,
1071 fn query_option_value(&self, name: &str) -> Option<NAValue> {
1073 KEYFRAME_OPTION => Some(NAValue::Int(i64::from(self.key_int))),
1074 "nstrips" => Some(NAValue::Int(self.nstrips as i64)),
1075 "quant_mode" => Some(NAValue::String(self.qmode.to_string())),
1081 pub fn get_encoder() -> Box<dyn NAEncoder + Send> {
1082 Box::new(CinepakEncoder::new())
1087 use nihav_core::codecs::*;
1088 use nihav_core::demuxers::*;
1089 use nihav_core::muxers::*;
1091 use nihav_codec_support::test::enc_video::*;
1094 fn test_cinepak_encoder() {
1095 let mut dmx_reg = RegisteredDemuxers::new();
1096 generic_register_all_demuxers(&mut dmx_reg);
1097 let mut dec_reg = RegisteredDecoders::new();
1098 generic_register_all_decoders(&mut dec_reg);
1099 let mut mux_reg = RegisteredMuxers::new();
1100 generic_register_all_muxers(&mut mux_reg);
1101 let mut enc_reg = RegisteredEncoders::new();
1102 generic_register_all_encoders(&mut enc_reg);
1104 // sample: https://samples.mplayerhq.hu/V-codecs/UCOD/TalkingHead_352x288.avi
1105 let dec_config = DecoderTestParams {
1107 in_name: "assets/Misc/TalkingHead_352x288.avi",
1108 stream_type: StreamType::Video,
1112 let enc_config = EncoderTestParams {
1114 enc_name: "cinepak",
1115 out_name: "cinepak.avi",
1118 let dst_vinfo = NAVideoInfo {
1121 format: YUV420_FORMAT,
1125 let enc_params = EncodeParameters {
1126 format: NACodecTypeInfo::Video(dst_vinfo),
1133 //test_encoding_to_file(&dec_config, &enc_config, enc_params, &[]);
1134 test_encoding_md5(&dec_config, &enc_config, enc_params, &[],
1135 &[0x1d4690c8, 0x3b15b4b3, 0xc2df3c7b, 0x1a25b159]);