1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::vq::*;
5 #[derive(Default,Clone,Copy,PartialEq,Debug)]
11 impl VQElement for YUVCode {
12 fn dist(&self, rval: Self) -> u32 {
14 for (y0, y1) in self.y.iter().zip(rval.y.iter()) {
15 let yd = i32::from(*y0) - i32::from(*y1);
18 let ud = i32::from(self.u) - i32::from(rval.u);
19 let vd = i32::from(self.v) - i32::from(rval.v);
20 (ysum + ud * ud + vd * vd) as u32
22 fn min_cw() -> Self { YUVCode { y: [0; 4], u: 0, v: 0 } }
23 fn max_cw() -> Self { YUVCode { y: [255; 4], u: 255, v: 255 } }
24 fn min(&self, rval: Self) -> Self {
25 let mut ycode = YUVCode::default();
27 ycode.y[i] = self.y[i].min(rval.y[i]);
29 ycode.u = self.u.min(rval.u);
30 ycode.v = self.v.min(rval.v);
33 fn max(&self, rval: Self) -> Self {
34 let mut ycode = YUVCode::default();
36 ycode.y[i] = self.y[i].max(rval.y[i]);
38 ycode.u = self.u.max(rval.u);
39 ycode.v = self.v.max(rval.v);
42 fn num_components() -> usize { 6 }
43 fn sort_by_component(arr: &mut [Self], component: usize) {
44 let mut counts = [0; 256];
45 for entry in arr.iter() {
46 let idx = match component {
47 0 | 1 | 2 | 3 => entry.y[component],
53 let mut offs = [0; 256];
55 offs[i + 1] = offs[i] + counts[i];
57 let mut dst = vec![YUVCode::default(); arr.len()];
58 for entry in arr.iter() {
59 let idx = match component {
60 0 | 1 | 2 | 3 => entry.y[component],
64 dst[offs[idx]] = *entry;
67 arr.copy_from_slice(dst.as_slice());
69 fn max_dist_component(min: &Self, max: &Self) -> usize {
73 let d = u32::from(max.y[i]) - u32::from(min.y[i]);
79 let ud = u32::from(max.u) - u32::from(min.u);
84 let vd = u32::from(max.v) - u32::from(min.v);
100 impl VQElementSum<YUVCode> for YUVCodeSum {
101 fn zero() -> Self { Self::default() }
102 fn add(&mut self, rval: YUVCode, count: u64) {
104 self.ysum[i] += u64::from(rval.y[i]) * count;
106 self.usum += u64::from(rval.u) * count;
107 self.vsum += u64::from(rval.v) * count;
110 fn get_centroid(&self) -> YUVCode {
112 let mut ycode = YUVCode::default();
114 ycode.y[i] = ((self.ysum[i] + self.count / 2) / self.count) as u8;
116 ycode.u = ((self.usum + self.count / 2) / self.count) as u8;
117 ycode.v = ((self.vsum + self.count / 2) / self.count) as u8;
130 fn new() -> Self { Self { seed: 0x12345678 } }
131 fn next(&mut self) -> u8 {
132 let mut x = self.seed;
133 x ^= x.wrapping_shl(13);
136 (self.seed >> 24) as u8
138 fn fill_entry(&mut self, entry: &mut YUVCode) {
139 for y in entry.y.iter_mut() {
142 entry.u = self.next();
143 entry.v = self.next();
147 const GRAY_FORMAT: NAPixelFormaton = NAPixelFormaton {
148 model: ColorModel::YUV(YUVSubmodel::YUVJ),
150 comp_info: [Some(NAPixelChromaton{h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 0, next_elem: 1}), None, None, None, None],
171 fn reset(&mut self) {
172 self.masks.truncate(0);
176 fn put_v1(&mut self) {
183 fn put_v4(&mut self) {
191 fn put_inter(&mut self, skip: bool) {
193 self.mask |= !skip as u32;
199 fn flush(&mut self) {
200 self.masks.push(self.mask);
205 if self.pos == 0 { return; }
206 while self.pos < 32 {
214 struct CinepakEncoder {
215 stream: Option<NAStreamRef>,
216 lastfrm: Option<NAVideoBufferRef<u8>>,
217 pkt: Option<NAPacket>,
221 v1_entries: Vec<YUVCode>,
222 v4_entries: Vec<YUVCode>,
223 v1_cb: [YUVCode; 256],
224 v4_cb: [YUVCode; 256],
225 v1_cur_cb: [YUVCode; 256],
226 v4_cur_cb: [YUVCode; 256],
235 fn avg4(a: u8, b: u8, c: u8, d: u8) -> u8 {
236 ((u16::from(a) + u16::from(b) + u16::from(c) + u16::from(d) + 3) >> 2) as u8
239 fn patch_size(bw: &mut ByteWriter, pos: u64) -> EncoderResult<()> {
240 let size = bw.tell() - pos;
241 bw.seek(SeekFrom::Current(-((size + 3) as i64)))?;
242 bw.write_u24be((size + 4) as u32)?;
243 bw.seek(SeekFrom::End(0))?;
247 impl CinepakEncoder {
256 v1_entries: Vec::new(),
257 v4_entries: Vec::new(),
258 v1_cb: [YUVCode::default(); 256],
259 v4_cb: [YUVCode::default(); 256],
260 v1_cur_cb: [YUVCode::default(); 256],
261 v4_cur_cb: [YUVCode::default(); 256],
266 masks: MaskWriter::new(),
267 skip_dist: Vec::new(),
270 fn read_strip(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
271 let ystride = in_frm.get_stride(0);
272 let mut yoff = in_frm.get_offset(0) + start * ystride;
273 let ustride = in_frm.get_stride(1);
274 let mut uoff = in_frm.get_offset(1) + start / 2 * ustride;
275 let vstride = in_frm.get_stride(2);
276 let mut voff = in_frm.get_offset(2) + start / 2 * vstride;
277 let (width, _) = in_frm.get_dimensions(0);
278 let data = in_frm.get_data();
279 self.v1_entries.truncate(0);
280 self.v4_entries.truncate(0);
281 for _ in (start..end).step_by(4) {
282 for x in (0..width).step_by(4) {
283 let mut yblk = [0; 16];
284 let mut ublk = [128; 4];
285 let mut vblk = [128; 4];
288 yblk[i + j * 4] = data[yoff + x + i + j * ystride];
294 ublk[i + j * 2] = data[uoff + x / 2 + i + j * ustride];
295 vblk[i + j * 2] = data[voff + x / 2 + i + j * vstride];
299 self.v1_entries.push(YUVCode {
300 y: [avg4(yblk[ 0], yblk[ 1], yblk[ 4], yblk[ 5]),
301 avg4(yblk[ 2], yblk[ 3], yblk[ 6], yblk[ 7]),
302 avg4(yblk[ 8], yblk[ 9], yblk[12], yblk[13]),
303 avg4(yblk[10], yblk[11], yblk[14], yblk[15])],
304 u: avg4(ublk[0], ublk[1], ublk[2], ublk[3]),
305 v: avg4(vblk[0], vblk[1], vblk[2], vblk[3]),
308 let yidx = (i & 1) * 2 + (i & 2) * 4;
309 self.v4_entries.push(YUVCode {
310 y: [ yblk[yidx], yblk[yidx + 1], yblk[yidx + 4], yblk[yidx + 5] ],
321 fn find_nearest(codebook: &[YUVCode; 256], code: YUVCode) -> (u8, u32) {
322 let mut min_dist = std::u32::MAX;
324 for (i, cw) in codebook.iter().enumerate() {
325 let dist = cw.dist(code);
334 (idx as u8, min_dist)
336 fn can_update_cb(new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], cb_size: usize) -> bool {
337 let mut skip_count = 0;
338 for (new, old) in new_cb.iter().zip(old_cb.iter()) {
343 let full_size = cb_size * 256;
344 let upd_size = cb_size * (256 - skip_count) + 64;
347 fn write_cb(bw: &mut ByteWriter, mut id: u8, new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], grayscale: bool, update: bool) -> EncoderResult<()> {
356 let chunk_pos = bw.tell();
358 for entry in new_cb.iter() {
359 bw.write_buf(&entry.y)?;
361 bw.write_byte(entry.u ^ 0x80)?;
362 bw.write_byte(entry.v ^ 0x80)?;
367 for (i, (ncw, ocw)) in new_cb.iter().rev().zip(old_cb.iter().rev()).enumerate() {
374 for i in (0..end).step_by(32) {
378 if new_cb[i + j] != old_cb[i + j] {
382 bw.write_u32be(mask)?;
384 if new_cb[i + j] == old_cb[i + j] { continue; }
385 bw.write_buf(&new_cb[i + j].y)?;
387 bw.write_byte(new_cb[i + j].u ^ 0x80)?;
388 bw.write_byte(new_cb[i + j].v ^ 0x80)?;
393 patch_size(bw, chunk_pos)?;
396 fn render_stripe(&mut self, intra: bool, start: usize, end: usize) {
397 if let Some(ref mut dst_frm) = self.lastfrm {
398 let ystride = dst_frm.get_stride(0);
399 let mut yoff = dst_frm.get_offset(0) + start * ystride;
400 let ustride = dst_frm.get_stride(1);
401 let mut uoff = dst_frm.get_offset(1) + start / 2 * ustride;
402 let vstride = dst_frm.get_stride(2);
403 let mut voff = dst_frm.get_offset(2) + start / 2 * vstride;
404 let (width, _) = dst_frm.get_dimensions(0);
405 let data = dst_frm.get_data_mut().unwrap();
406 let mut miter = self.masks.masks.iter();
407 let mut v1_iter = self.v1_idx.iter();
408 let mut v4_iter = self.v4_idx.iter();
409 let mut cur_mask = 0;
411 for _ in (start..end).step_by(4) {
412 for x in (0..width).step_by(4) {
414 if !intra || self.v1_idx.len() > 0 {
415 cur_mask = *miter.next().unwrap();
417 cur_mask = 0xFFFFFFFF;
422 if (cur_mask & cur_bit) == 0 {
428 cur_mask = *miter.next().unwrap();
432 if (cur_mask & cur_bit) == 0 {
433 let idx = *v1_iter.next().unwrap() as usize;
434 let cb = &self.v1_cur_cb[idx];
436 let mut coff = yoff + x;
437 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
438 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
440 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
441 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
443 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
444 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
446 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
447 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
450 let mut coff = uoff + x / 2;
451 data[coff] = cb.u; data[coff + 1] = cb.u;
453 data[coff] = cb.u; data[coff + 1] = cb.u;
455 let mut coff = voff + x / 2;
456 data[coff] = cb.v; data[coff + 1] = cb.v;
458 data[coff] = cb.v; data[coff + 1] = cb.v;
461 let idx0 = *v4_iter.next().unwrap() as usize;
462 let cb0 = &self.v4_cur_cb[idx0];
463 let idx1 = *v4_iter.next().unwrap() as usize;
464 let cb1 = &self.v4_cur_cb[idx1];
465 let idx2 = *v4_iter.next().unwrap() as usize;
466 let cb2 = &self.v4_cur_cb[idx2];
467 let idx3 = *v4_iter.next().unwrap() as usize;
468 let cb3 = &self.v4_cur_cb[idx3];
470 let mut coff = yoff + x;
471 data[coff] = cb0.y[0]; data[coff + 1] = cb0.y[1];
472 data[coff + 2] = cb1.y[0]; data[coff + 3] = cb1.y[1];
474 data[coff] = cb0.y[2]; data[coff + 1] = cb0.y[3];
475 data[coff + 2] = cb1.y[2]; data[coff + 3] = cb1.y[3];
477 data[coff] = cb2.y[0]; data[coff + 1] = cb2.y[1];
478 data[coff + 2] = cb3.y[0]; data[coff + 3] = cb3.y[1];
480 data[coff] = cb2.y[2]; data[coff + 1] = cb2.y[3];
481 data[coff + 2] = cb3.y[2]; data[coff + 3] = cb3.y[3];
484 let mut coff = uoff + x / 2;
485 data[coff] = cb0.u; data[coff + 1] = cb1.u;
487 data[coff] = cb2.u; data[coff + 1] = cb3.u;
489 let mut coff = voff + x / 2;
490 data[coff] = cb0.v; data[coff + 1] = cb1.v;
492 data[coff] = cb2.v; data[coff + 1] = cb3.v;
505 fn calc_skip_dist(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
506 self.skip_dist.truncate(0);
507 if let Some(ref ref_frm) = self.lastfrm {
508 let rystride = ref_frm.get_stride(0);
509 let mut ryoff = ref_frm.get_offset(0) + start * rystride;
510 let rustride = ref_frm.get_stride(1);
511 let mut ruoff = ref_frm.get_offset(1) + start / 2 * rustride;
512 let rvstride = ref_frm.get_stride(2);
513 let mut rvoff = ref_frm.get_offset(2) + start / 2 * rvstride;
514 let (width, _) = ref_frm.get_dimensions(0);
515 let rdata = ref_frm.get_data();
517 let iystride = in_frm.get_stride(0);
518 let mut iyoff = in_frm.get_offset(0) + start * iystride;
519 let iustride = in_frm.get_stride(1);
520 let mut iuoff = in_frm.get_offset(1) + start / 2 * iustride;
521 let ivstride = in_frm.get_stride(2);
522 let mut ivoff = in_frm.get_offset(2) + start / 2 * ivstride;
523 let idata = in_frm.get_data();
525 for _ in (start..end).step_by(4) {
526 for x in (0..width).step_by(4) {
528 let mut roff = ryoff + x;
529 let mut ioff = iyoff + x;
532 let d = i32::from(rdata[roff + i]) - i32::from(idata[ioff + i]);
539 let mut roff = ruoff + x / 2;
540 let mut ioff = iuoff + x / 2;
541 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
543 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
545 roff += rustride; ioff += iustride;
546 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
548 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
551 let mut roff = rvoff + x / 2;
552 let mut ioff = ivoff + x / 2;
553 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
555 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
557 roff += rvstride; ioff += ivstride;
558 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
560 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
563 self.skip_dist.push(dist as u32);
566 iyoff += iystride * 4;
567 iuoff += iustride * 2;
568 ivoff += ivstride * 2;
569 ryoff += rystride * 4;
570 ruoff += rustride * 2;
571 rvoff += rvstride * 2;
577 fn encode_intra(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
578 let (width, height) = in_frm.get_dimensions(0);
579 let mut strip_h = (height / self.nstrips + 3) & !3;
584 let mut start_line = 0;
585 let mut end_line = strip_h;
587 bw.write_byte(0)?; // intra flag
588 bw.write_u24be(0)?; // frame size
589 let frame_data_pos = bw.tell();
590 bw.write_u16be(width as u16)?;
591 bw.write_u16be(height as u16)?;
592 bw.write_u16be(self.nstrips as u16)?;
594 for entry in self.v1_cb.iter_mut() {
595 self.rng.fill_entry(entry);
597 for entry in self.v4_cb.iter_mut() {
598 self.rng.fill_entry(entry);
600 while start_line < height {
601 self.read_strip(in_frm, start_line, end_line);
603 // let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cb);
604 // let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cb);
605 // elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb);
606 // elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb);
607 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb);
608 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb);
610 for cw in self.v1_cur_cb.iter_mut() {
614 for cw in self.v4_cur_cb.iter_mut() {
620 self.v1_idx.truncate(0);
621 self.v4_idx.truncate(0);
624 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
625 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb, *v1_entry);
628 self.v1_idx.push(v1_idx);
631 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[0]);
632 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[1]);
633 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[2]);
634 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[3]);
635 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
637 self.v4_idx.push(v40_idx);
638 self.v4_idx.push(v41_idx);
639 self.v4_idx.push(v42_idx);
640 self.v4_idx.push(v43_idx);
643 self.v1_idx.push(v1_idx);
648 let mut is_intra_strip = start_line == 0;
649 let (upd_v1, upd_v4) = if !is_intra_strip {
650 let cb_size = if self.grayscale { 4 } else { 6 };
651 (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size),
652 Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size))
656 if !is_intra_strip && !upd_v1 && !upd_v4 {
657 is_intra_strip = true;
659 bw.write_byte(if is_intra_strip { 0x10 } else { 0x11 })?;
660 bw.write_u24be(0)?; // strip size
661 let strip_data_pos = bw.tell();
662 bw.write_u16be(0)?; // yoff
663 bw.write_u16be(0)?; // xoff
664 bw.write_u16be((end_line - start_line) as u16)?;
665 bw.write_u16be(width as u16)?;
667 Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?;
668 Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?;
670 self.render_stripe(true, start_line, end_line);
672 if self.v1_idx.len() == 0 {
673 bw.write_byte(0x32)?;
674 bw.write_u24be((self.v4_idx.len() + 4) as u32)?;
675 bw.write_buf(self.v4_idx.as_slice())?;
677 bw.write_byte(0x30)?;
679 let chunk_pos = bw.tell();
689 for mask in self.masks.masks.iter() {
690 bw.write_u32be(*mask)?;
691 for j in (0..32).rev() {
692 if (mask & (1 << j)) == 0 {
693 bw.write_byte(self.v1_idx[v1_pos])?;
696 bw.write_byte(self.v4_idx[v4_pos])?;
697 bw.write_byte(self.v4_idx[v4_pos + 1])?;
698 bw.write_byte(self.v4_idx[v4_pos + 2])?;
699 bw.write_byte(self.v4_idx[v4_pos + 3])?;
704 patch_size(bw, chunk_pos)?;
707 patch_size(bw, strip_data_pos)?;
709 self.v1_cb.copy_from_slice(&self.v1_cur_cb);
710 self.v4_cb.copy_from_slice(&self.v4_cur_cb);
711 start_line = end_line;
712 end_line = (end_line + strip_h).min(height);
714 patch_size(bw, frame_data_pos)?;
717 fn encode_inter(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
718 let (width, height) = in_frm.get_dimensions(0);
719 let mut strip_h = (height / self.nstrips + 3) & !3;
724 let mut start_line = 0;
725 let mut end_line = strip_h;
727 bw.write_byte(1)?; // intra flag
728 bw.write_u24be(0)?; // frame size
729 let frame_data_pos = bw.tell();
730 bw.write_u16be(width as u16)?;
731 bw.write_u16be(height as u16)?;
732 bw.write_u16be(self.nstrips as u16)?;
734 while start_line < height {
735 self.read_strip(in_frm, start_line, end_line);
736 self.calc_skip_dist(in_frm, start_line, end_line);
738 // let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cb);
739 // let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cb);
740 // elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb);
741 // elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb);
742 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb);
743 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb);
745 for cw in self.v1_cur_cb.iter_mut() {
749 for cw in self.v4_cur_cb.iter_mut() {
755 self.v1_idx.truncate(0);
756 self.v4_idx.truncate(0);
759 let mut skip_iter = self.skip_dist.iter();
760 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
761 let skip_dist = *skip_iter.next().unwrap();
763 self.masks.put_inter(true);
766 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb, *v1_entry);
767 if skip_dist < v1_dist {
768 self.masks.put_inter(true);
771 self.masks.put_inter(false);
775 self.v1_idx.push(v1_idx);
778 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[0]);
779 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[1]);
780 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[2]);
781 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[3]);
782 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
784 self.v4_idx.push(v40_idx);
785 self.v4_idx.push(v41_idx);
786 self.v4_idx.push(v42_idx);
787 self.v4_idx.push(v43_idx);
790 self.v1_idx.push(v1_idx);
795 let (upd_v1, upd_v4) = {
796 let cb_size = if self.grayscale { 4 } else { 6 };
797 (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size),
798 Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size))
800 bw.write_byte(0x11)?;
801 bw.write_u24be(0)?; // strip size
802 let strip_data_pos = bw.tell();
803 bw.write_u16be(0)?; // yoff
804 bw.write_u16be(0)?; // xoff
805 bw.write_u16be((end_line - start_line) as u16)?;
806 bw.write_u16be(width as u16)?;
808 Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?;
809 Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?;
811 self.render_stripe(false, start_line, end_line);
813 bw.write_byte(0x31)?;
815 let chunk_pos = bw.tell();
826 for mask in self.masks.masks.iter() {
827 bw.write_u32be(*mask)?;
828 if *mask == 0 { continue; }
829 let mut bit = 1 << 31;
832 skip = (mask & bit) == 0;
835 if (mask & bit) == 0 {
836 bw.write_byte(self.v1_idx[v1_pos])?;
839 bw.write_byte(self.v4_idx[v4_pos])?;
840 bw.write_byte(self.v4_idx[v4_pos + 1])?;
841 bw.write_byte(self.v4_idx[v4_pos + 2])?;
842 bw.write_byte(self.v4_idx[v4_pos + 3])?;
850 patch_size(bw, chunk_pos)?;
852 patch_size(bw, strip_data_pos)?;
854 self.v1_cb.copy_from_slice(&self.v1_cur_cb);
855 self.v4_cb.copy_from_slice(&self.v4_cur_cb);
856 start_line = end_line;
857 end_line = (end_line + strip_h).min(height);
859 patch_size(bw, frame_data_pos)?;
864 impl NAEncoder for CinepakEncoder {
865 fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult<EncodeParameters> {
866 match encinfo.format {
867 NACodecTypeInfo::None => {
868 let mut ofmt = EncodeParameters::default();
869 ofmt.format = NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, YUV420_FORMAT));
872 NACodecTypeInfo::Audio(_) => return Err(EncoderError::FormatError),
873 NACodecTypeInfo::Video(vinfo) => {
874 let pix_fmt = if vinfo.format == GRAY_FORMAT { GRAY_FORMAT } else { YUV420_FORMAT };
875 let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, true, pix_fmt);
876 let mut ofmt = *encinfo;
877 ofmt.format = NACodecTypeInfo::Video(outinfo);
882 fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult<NAStreamRef> {
883 match encinfo.format {
884 NACodecTypeInfo::None => Err(EncoderError::FormatError),
885 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
886 NACodecTypeInfo::Video(vinfo) => {
887 if vinfo.format != YUV420_FORMAT && vinfo.format != GRAY_FORMAT {
888 return Err(EncoderError::FormatError);
890 if ((vinfo.width | vinfo.height) & 3) != 0 {
891 return Err(EncoderError::FormatError);
893 if (vinfo.width | vinfo.height) >= (1 << 16) {
894 return Err(EncoderError::FormatError);
897 let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, false, vinfo.format);
898 let info = NACodecInfo::new("cinepak", NACodecTypeInfo::Video(out_info.clone()), None);
899 let stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den).into_ref();
901 self.stream = Some(stream.clone());
902 self.quality = encinfo.quality;
903 self.grayscale = vinfo.format != YUV420_FORMAT;
904 let num_blocks = vinfo.width / 2 * vinfo.height / 2;
905 self.v1_entries = Vec::with_capacity(num_blocks);
906 self.v4_entries = Vec::with_capacity(num_blocks * 4);
907 self.v1_idx = Vec::with_capacity(num_blocks);
908 self.v4_idx = Vec::with_capacity(num_blocks * 4);
909 self.skip_dist = Vec::with_capacity(vinfo.width / 4 * vinfo.height / 4);
911 let buf = alloc_video_buffer(out_info, 2)?;
912 self.lastfrm = Some(buf.get_vbuf().unwrap());
918 fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> {
919 let buf = frm.get_buffer();
920 if let Some(ref vbuf) = buf.get_vbuf() {
921 let mut dbuf = Vec::with_capacity(4);
922 let mut gw = GrowableMemoryWriter::new_write(&mut dbuf);
923 let mut bw = ByteWriter::new(&mut gw);
924 let is_intra = if self.frmcount == 0 {
925 self.encode_intra(&mut bw, vbuf)?
927 self.encode_inter(&mut bw, vbuf)?
929 self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf));
931 if self.frmcount == 25 {
936 Err(EncoderError::InvalidParameters)
939 fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>> {
941 std::mem::swap(&mut self.pkt, &mut npkt);
944 fn flush(&mut self) -> EncoderResult<()> {
950 impl NAOptionHandler for CinepakEncoder {
951 fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] }
952 fn set_options(&mut self, _options: &[NAOption]) { }
953 fn query_option_value(&self, _name: &str) -> Option<NAValue> { None }
956 pub fn get_encoder() -> Box<dyn NAEncoder + Send> {
957 Box::new(CinepakEncoder::new())
962 use nihav_core::codecs::*;
963 use nihav_core::demuxers::*;
964 use nihav_core::muxers::*;
966 use nihav_codec_support::test::enc_video::*;
969 fn test_cinepak_encoder() {
970 let mut dmx_reg = RegisteredDemuxers::new();
971 generic_register_all_demuxers(&mut dmx_reg);
972 let mut dec_reg = RegisteredDecoders::new();
973 generic_register_all_codecs(&mut dec_reg);
974 let mut mux_reg = RegisteredMuxers::new();
975 generic_register_all_muxers(&mut mux_reg);
976 let mut enc_reg = RegisteredEncoders::new();
977 generic_register_all_encoders(&mut enc_reg);
979 let dec_config = DecoderTestParams {
981 in_name: "assets/Misc/TalkingHead_352x288.avi",
982 stream_type: StreamType::Video,
986 let enc_config = EncoderTestParams {
989 out_name: "cinepak.avi",
992 let dst_vinfo = NAVideoInfo {
995 format: YUV420_FORMAT,
998 let enc_params = EncodeParameters {
999 format: NACodecTypeInfo::Video(dst_vinfo),
1006 test_encoding_to_file(&dec_config, &enc_config, enc_params);