d40f504dbc828d81aba036177c220e53bbb28cfe
[nihav.git] / nihav-commonfmt / src / codecs / cinepakenc.rs
1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::vq::*;
4
5 #[derive(Default,Clone,Copy,PartialEq,Debug)]
6 struct YUVCode {
7 y: [u8; 4],
8 u: u8,
9 v: u8,
10 }
11 impl VQElement for YUVCode {
12 fn dist(&self, rval: Self) -> u32 {
13 let mut ysum = 0;
14 for (y0, y1) in self.y.iter().zip(rval.y.iter()) {
15 let yd = i32::from(*y0) - i32::from(*y1);
16 ysum += yd * yd;
17 }
18 let ud = i32::from(self.u) - i32::from(rval.u);
19 let vd = i32::from(self.v) - i32::from(rval.v);
20 (ysum + ud * ud + vd * vd) as u32
21 }
22 fn min_cw() -> Self { YUVCode { y: [0; 4], u: 0, v: 0 } }
23 fn max_cw() -> Self { YUVCode { y: [255; 4], u: 255, v: 255 } }
24 fn min(&self, rval: Self) -> Self {
25 let mut ycode = YUVCode::default();
26 for i in 0..4 {
27 ycode.y[i] = self.y[i].min(rval.y[i]);
28 }
29 ycode.u = self.u.min(rval.u);
30 ycode.v = self.v.min(rval.v);
31 ycode
32 }
33 fn max(&self, rval: Self) -> Self {
34 let mut ycode = YUVCode::default();
35 for i in 0..4 {
36 ycode.y[i] = self.y[i].max(rval.y[i]);
37 }
38 ycode.u = self.u.max(rval.u);
39 ycode.v = self.v.max(rval.v);
40 ycode
41 }
42 fn num_components() -> usize { 6 }
43 fn sort_by_component(arr: &mut [Self], component: usize) {
44 let mut counts = [0; 256];
45 for entry in arr.iter() {
46 let idx = match component {
47 0 | 1 | 2 | 3 => entry.y[component],
48 4 => entry.u,
49 _ => entry.v,
50 } as usize;
51 counts[idx] += 1;
52 }
53 let mut offs = [0; 256];
54 for i in 0..255 {
55 offs[i + 1] = offs[i] + counts[i];
56 }
57 let mut dst = vec![YUVCode::default(); arr.len()];
58 for entry in arr.iter() {
59 let idx = match component {
60 0 | 1 | 2 | 3 => entry.y[component],
61 4 => entry.u,
62 _ => entry.v,
63 } as usize;
64 dst[offs[idx]] = *entry;
65 offs[idx] += 1;
66 }
67 arr.copy_from_slice(dst.as_slice());
68 }
69 fn max_dist_component(min: &Self, max: &Self) -> usize {
70 let mut comp = 0;
71 let mut diff = 0;
72 for i in 0..4 {
73 let d = u32::from(max.y[i]) - u32::from(min.y[i]);
74 if d > diff {
75 diff = d;
76 comp = i;
77 }
78 }
79 let ud = u32::from(max.u) - u32::from(min.u);
80 if ud > diff {
81 diff = ud;
82 comp = 4;
83 }
84 let vd = u32::from(max.v) - u32::from(min.v);
85 if vd > diff {
86 comp = 5;
87 }
88 comp
89 }
90 }
91
92 #[derive(Default)]
93 struct YUVCodeSum {
94 ysum: [u64; 4],
95 usum: u64,
96 vsum: u64,
97 count: u64,
98 }
99
100 impl VQElementSum<YUVCode> for YUVCodeSum {
101 fn zero() -> Self { Self::default() }
102 fn add(&mut self, rval: YUVCode, count: u64) {
103 for i in 0..4 {
104 self.ysum[i] += u64::from(rval.y[i]) * count;
105 }
106 self.usum += u64::from(rval.u) * count;
107 self.vsum += u64::from(rval.v) * count;
108 self.count += count;
109 }
110 fn get_centroid(&self) -> YUVCode {
111 if self.count != 0 {
112 let mut ycode = YUVCode::default();
113 for i in 0..4 {
114 ycode.y[i] = ((self.ysum[i] + self.count / 2) / self.count) as u8;
115 }
116 ycode.u = ((self.usum + self.count / 2) / self.count) as u8;
117 ycode.v = ((self.vsum + self.count / 2) / self.count) as u8;
118 ycode
119 } else {
120 YUVCode::default()
121 }
122 }
123 }
124
125 struct RNG {
126 seed: u32,
127 }
128
129 impl RNG {
130 fn new() -> Self { Self { seed: 0x12345678 } }
131 fn next(&mut self) -> u8 {
132 let mut x = self.seed;
133 x ^= x.wrapping_shl(13);
134 x ^= x >> 17;
135 self.seed = x;
136 (self.seed >> 24) as u8
137 }
138 fn fill_entry(&mut self, entry: &mut YUVCode) {
139 for y in entry.y.iter_mut() {
140 *y = self.next();
141 }
142 entry.u = self.next();
143 entry.v = self.next();
144 }
145 }
146
147 const GRAY_FORMAT: NAPixelFormaton = NAPixelFormaton {
148 model: ColorModel::YUV(YUVSubmodel::YUVJ),
149 components: 1,
150 comp_info: [Some(NAPixelChromaton{h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 0, next_elem: 1}), None, None, None, None],
151 elem_size: 1,
152 be: true,
153 alpha: false,
154 palette: false,
155 };
156
157 struct MaskWriter {
158 masks: Vec<u32>,
159 mask: u32,
160 pos: u8,
161 }
162
163 impl MaskWriter {
164 fn new() -> Self {
165 Self {
166 masks: Vec::new(),
167 mask: 0,
168 pos: 0,
169 }
170 }
171 fn reset(&mut self) {
172 self.masks.truncate(0);
173 self.mask = 0;
174 self.pos = 0;
175 }
176 fn put_v1(&mut self) {
177 self.mask <<= 1;
178 self.pos += 1;
179 if self.pos == 32 {
180 self.flush();
181 }
182 }
183 fn put_v4(&mut self) {
184 self.mask <<= 1;
185 self.mask |= 1;
186 self.pos += 1;
187 if self.pos == 32 {
188 self.flush();
189 }
190 }
191 fn put_inter(&mut self, skip: bool) {
192 self.mask <<= 1;
193 self.mask |= !skip as u32;
194 self.pos += 1;
195 if self.pos == 32 {
196 self.flush();
197 }
198 }
199 fn flush(&mut self) {
200 self.masks.push(self.mask);
201 self.mask = 0;
202 self.pos = 0;
203 }
204 fn end(&mut self) {
205 if self.pos == 0 { return; }
206 while self.pos < 32 {
207 self.mask <<= 1;
208 self.pos += 1;
209 }
210 self.flush();
211 }
212 }
213
214 struct CinepakEncoder {
215 stream: Option<NAStreamRef>,
216 lastfrm: Option<NAVideoBufferRef<u8>>,
217 pkt: Option<NAPacket>,
218 frmcount: u8,
219 quality: u8,
220 nstrips: usize,
221 v1_entries: Vec<YUVCode>,
222 v4_entries: Vec<YUVCode>,
223 v1_cb: [YUVCode; 256],
224 v4_cb: [YUVCode; 256],
225 v1_cur_cb: [YUVCode; 256],
226 v4_cur_cb: [YUVCode; 256],
227 v1_idx: Vec<u8>,
228 v4_idx: Vec<u8>,
229 grayscale: bool,
230 rng: RNG,
231 masks: MaskWriter,
232 skip_dist: Vec<u32>,
233 }
234
235 fn avg4(a: u8, b: u8, c: u8, d: u8) -> u8 {
236 ((u16::from(a) + u16::from(b) + u16::from(c) + u16::from(d) + 3) >> 2) as u8
237 }
238
239 fn patch_size(bw: &mut ByteWriter, pos: u64) -> EncoderResult<()> {
240 let size = bw.tell() - pos;
241 bw.seek(SeekFrom::Current(-((size + 3) as i64)))?;
242 bw.write_u24be((size + 4) as u32)?;
243 bw.seek(SeekFrom::End(0))?;
244 Ok(())
245 }
246
247 impl CinepakEncoder {
248 fn new() -> Self {
249 Self {
250 stream: None,
251 pkt: None,
252 lastfrm: None,
253 frmcount: 0,
254 quality: 0,
255 nstrips: 2,
256 v1_entries: Vec::new(),
257 v4_entries: Vec::new(),
258 v1_cb: [YUVCode::default(); 256],
259 v4_cb: [YUVCode::default(); 256],
260 v1_cur_cb: [YUVCode::default(); 256],
261 v4_cur_cb: [YUVCode::default(); 256],
262 grayscale: false,
263 rng: RNG::new(),
264 v1_idx: Vec::new(),
265 v4_idx: Vec::new(),
266 masks: MaskWriter::new(),
267 skip_dist: Vec::new(),
268 }
269 }
270 fn read_strip(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
271 let ystride = in_frm.get_stride(0);
272 let mut yoff = in_frm.get_offset(0) + start * ystride;
273 let ustride = in_frm.get_stride(1);
274 let mut uoff = in_frm.get_offset(1) + start / 2 * ustride;
275 let vstride = in_frm.get_stride(2);
276 let mut voff = in_frm.get_offset(2) + start / 2 * vstride;
277 let (width, _) = in_frm.get_dimensions(0);
278 let data = in_frm.get_data();
279 self.v1_entries.truncate(0);
280 self.v4_entries.truncate(0);
281 for _ in (start..end).step_by(4) {
282 for x in (0..width).step_by(4) {
283 let mut yblk = [0; 16];
284 let mut ublk = [128; 4];
285 let mut vblk = [128; 4];
286 for j in 0..4 {
287 for i in 0..4 {
288 yblk[i + j * 4] = data[yoff + x + i + j * ystride];
289 }
290 }
291 if !self.grayscale {
292 for j in 0..2 {
293 for i in 0..2 {
294 ublk[i + j * 2] = data[uoff + x / 2 + i + j * ustride];
295 vblk[i + j * 2] = data[voff + x / 2 + i + j * vstride];
296 }
297 }
298 }
299 self.v1_entries.push(YUVCode {
300 y: [avg4(yblk[ 0], yblk[ 1], yblk[ 4], yblk[ 5]),
301 avg4(yblk[ 2], yblk[ 3], yblk[ 6], yblk[ 7]),
302 avg4(yblk[ 8], yblk[ 9], yblk[12], yblk[13]),
303 avg4(yblk[10], yblk[11], yblk[14], yblk[15])],
304 u: avg4(ublk[0], ublk[1], ublk[2], ublk[3]),
305 v: avg4(vblk[0], vblk[1], vblk[2], vblk[3]),
306 });
307 for i in 0..4 {
308 let yidx = (i & 1) * 2 + (i & 2) * 4;
309 self.v4_entries.push(YUVCode {
310 y: [ yblk[yidx], yblk[yidx + 1], yblk[yidx + 4], yblk[yidx + 5] ],
311 u: ublk[i],
312 v: vblk[i],
313 });
314 }
315 }
316 yoff += ystride * 4;
317 uoff += ustride * 2;
318 voff += vstride * 2;
319 }
320 }
321 fn find_nearest(codebook: &[YUVCode; 256], code: YUVCode) -> (u8, u32) {
322 let mut min_dist = std::u32::MAX;
323 let mut idx = 0;
324 for (i, cw) in codebook.iter().enumerate() {
325 let dist = cw.dist(code);
326 if dist < min_dist {
327 min_dist = dist;
328 idx = i;
329 if dist == 0 {
330 break;
331 }
332 }
333 }
334 (idx as u8, min_dist)
335 }
336 fn can_update_cb(new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], cb_size: usize) -> bool {
337 let mut skip_count = 0;
338 for (new, old) in new_cb.iter().zip(old_cb.iter()) {
339 if new == old {
340 skip_count += 1;
341 }
342 }
343 let full_size = cb_size * 256;
344 let upd_size = cb_size * (256 - skip_count) + 64;
345 upd_size < full_size
346 }
347 fn write_cb(bw: &mut ByteWriter, mut id: u8, new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], grayscale: bool, update: bool) -> EncoderResult<()> {
348 if grayscale {
349 id |= 4;
350 }
351 if update {
352 id |= 1;
353 }
354 bw.write_byte(id)?;
355 bw.write_u24be(0)?;
356 let chunk_pos = bw.tell();
357 if !update {
358 for entry in new_cb.iter() {
359 bw.write_buf(&entry.y)?;
360 if !grayscale {
361 bw.write_byte(entry.u ^ 0x80)?;
362 bw.write_byte(entry.v ^ 0x80)?;
363 }
364 }
365 } else {
366 let mut end = 256;
367 for (i, (ncw, ocw)) in new_cb.iter().rev().zip(old_cb.iter().rev()).enumerate() {
368 if ncw == ocw {
369 end = i;
370 } else {
371 break;
372 }
373 }
374 for i in (0..end).step_by(32) {
375 let mut mask = 0;
376 for j in 0..32 {
377 mask <<= 1;
378 if new_cb[i + j] != old_cb[i + j] {
379 mask |= 1;
380 }
381 }
382 bw.write_u32be(mask)?;
383 for j in 0..32 {
384 if new_cb[i + j] == old_cb[i + j] { continue; }
385 bw.write_buf(&new_cb[i + j].y)?;
386 if !grayscale {
387 bw.write_byte(new_cb[i + j].u ^ 0x80)?;
388 bw.write_byte(new_cb[i + j].v ^ 0x80)?;
389 }
390 }
391 }
392 }
393 patch_size(bw, chunk_pos)?;
394 Ok(())
395 }
396 fn render_stripe(&mut self, intra: bool, start: usize, end: usize) {
397 if let Some(ref mut dst_frm) = self.lastfrm {
398 let ystride = dst_frm.get_stride(0);
399 let mut yoff = dst_frm.get_offset(0) + start * ystride;
400 let ustride = dst_frm.get_stride(1);
401 let mut uoff = dst_frm.get_offset(1) + start / 2 * ustride;
402 let vstride = dst_frm.get_stride(2);
403 let mut voff = dst_frm.get_offset(2) + start / 2 * vstride;
404 let (width, _) = dst_frm.get_dimensions(0);
405 let data = dst_frm.get_data_mut().unwrap();
406 let mut miter = self.masks.masks.iter();
407 let mut v1_iter = self.v1_idx.iter();
408 let mut v4_iter = self.v4_idx.iter();
409 let mut cur_mask = 0;
410 let mut cur_bit = 0;
411 for _ in (start..end).step_by(4) {
412 for x in (0..width).step_by(4) {
413 if cur_bit == 0 {
414 if !intra || self.v1_idx.len() > 0 {
415 cur_mask = *miter.next().unwrap();
416 } else {
417 cur_mask = 0xFFFFFFFF;
418 }
419 cur_bit = 1 << 31;
420 }
421 if !intra {
422 if (cur_mask & cur_bit) == 0 {
423 cur_bit >>= 1;
424 continue;
425 }
426 cur_bit >>= 1;
427 if cur_bit == 0 {
428 cur_mask = *miter.next().unwrap();
429 cur_bit = 1 << 31;
430 }
431 }
432 if (cur_mask & cur_bit) == 0 {
433 let idx = *v1_iter.next().unwrap() as usize;
434 let cb = &self.v1_cur_cb[idx];
435
436 let mut coff = yoff + x;
437 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
438 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
439 coff += ystride;
440 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
441 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
442 coff += ystride;
443 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
444 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
445 coff += ystride;
446 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
447 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
448
449 if !self.grayscale {
450 let mut coff = uoff + x / 2;
451 data[coff] = cb.u; data[coff + 1] = cb.u;
452 coff += ustride;
453 data[coff] = cb.u; data[coff + 1] = cb.u;
454
455 let mut coff = voff + x / 2;
456 data[coff] = cb.v; data[coff + 1] = cb.v;
457 coff += vstride;
458 data[coff] = cb.v; data[coff + 1] = cb.v;
459 }
460 } else {
461 let idx0 = *v4_iter.next().unwrap() as usize;
462 let cb0 = &self.v4_cur_cb[idx0];
463 let idx1 = *v4_iter.next().unwrap() as usize;
464 let cb1 = &self.v4_cur_cb[idx1];
465 let idx2 = *v4_iter.next().unwrap() as usize;
466 let cb2 = &self.v4_cur_cb[idx2];
467 let idx3 = *v4_iter.next().unwrap() as usize;
468 let cb3 = &self.v4_cur_cb[idx3];
469
470 let mut coff = yoff + x;
471 data[coff] = cb0.y[0]; data[coff + 1] = cb0.y[1];
472 data[coff + 2] = cb1.y[0]; data[coff + 3] = cb1.y[1];
473 coff += ystride;
474 data[coff] = cb0.y[2]; data[coff + 1] = cb0.y[3];
475 data[coff + 2] = cb1.y[2]; data[coff + 3] = cb1.y[3];
476 coff += ystride;
477 data[coff] = cb2.y[0]; data[coff + 1] = cb2.y[1];
478 data[coff + 2] = cb3.y[0]; data[coff + 3] = cb3.y[1];
479 coff += ystride;
480 data[coff] = cb2.y[2]; data[coff + 1] = cb2.y[3];
481 data[coff + 2] = cb3.y[2]; data[coff + 3] = cb3.y[3];
482
483 if !self.grayscale {
484 let mut coff = uoff + x / 2;
485 data[coff] = cb0.u; data[coff + 1] = cb1.u;
486 coff += ustride;
487 data[coff] = cb2.u; data[coff + 1] = cb3.u;
488
489 let mut coff = voff + x / 2;
490 data[coff] = cb0.v; data[coff + 1] = cb1.v;
491 coff += vstride;
492 data[coff] = cb2.v; data[coff + 1] = cb3.v;
493 }
494 }
495 cur_bit >>= 1;
496 }
497 yoff += ystride * 4;
498 uoff += ustride * 2;
499 voff += vstride * 2;
500 }
501 } else {
502 unreachable!();
503 }
504 }
505 fn calc_skip_dist(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
506 self.skip_dist.truncate(0);
507 if let Some(ref ref_frm) = self.lastfrm {
508 let rystride = ref_frm.get_stride(0);
509 let mut ryoff = ref_frm.get_offset(0) + start * rystride;
510 let rustride = ref_frm.get_stride(1);
511 let mut ruoff = ref_frm.get_offset(1) + start / 2 * rustride;
512 let rvstride = ref_frm.get_stride(2);
513 let mut rvoff = ref_frm.get_offset(2) + start / 2 * rvstride;
514 let (width, _) = ref_frm.get_dimensions(0);
515 let rdata = ref_frm.get_data();
516
517 let iystride = in_frm.get_stride(0);
518 let mut iyoff = in_frm.get_offset(0) + start * iystride;
519 let iustride = in_frm.get_stride(1);
520 let mut iuoff = in_frm.get_offset(1) + start / 2 * iustride;
521 let ivstride = in_frm.get_stride(2);
522 let mut ivoff = in_frm.get_offset(2) + start / 2 * ivstride;
523 let idata = in_frm.get_data();
524
525 for _ in (start..end).step_by(4) {
526 for x in (0..width).step_by(4) {
527 let mut dist = 0;
528 let mut roff = ryoff + x;
529 let mut ioff = iyoff + x;
530 for _ in 0..4 {
531 for i in 0..4 {
532 let d = i32::from(rdata[roff + i]) - i32::from(idata[ioff + i]);
533 dist += d * d;
534 }
535 roff += rystride;
536 ioff += iystride;
537 }
538 if !self.grayscale {
539 let mut roff = ruoff + x / 2;
540 let mut ioff = iuoff + x / 2;
541 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
542 dist += ud * ud;
543 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
544 dist += ud * ud;
545 roff += rustride; ioff += iustride;
546 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
547 dist += ud * ud;
548 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
549 dist += ud * ud;
550
551 let mut roff = rvoff + x / 2;
552 let mut ioff = ivoff + x / 2;
553 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
554 dist += vd * vd;
555 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
556 dist += vd * vd;
557 roff += rvstride; ioff += ivstride;
558 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
559 dist += vd * vd;
560 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
561 dist += vd * vd;
562 }
563 self.skip_dist.push(dist as u32);
564 }
565
566 iyoff += iystride * 4;
567 iuoff += iustride * 2;
568 ivoff += ivstride * 2;
569 ryoff += rystride * 4;
570 ruoff += rustride * 2;
571 rvoff += rvstride * 2;
572 }
573 } else {
574 unreachable!();
575 }
576 }
577 fn encode_intra(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
578 let (width, height) = in_frm.get_dimensions(0);
579 let mut strip_h = (height / self.nstrips + 3) & !3;
580 if strip_h == 0 {
581 self.nstrips = 1;
582 strip_h = height;
583 }
584 let mut start_line = 0;
585 let mut end_line = strip_h;
586
587 bw.write_byte(0)?; // intra flag
588 bw.write_u24be(0)?; // frame size
589 let frame_data_pos = bw.tell();
590 bw.write_u16be(width as u16)?;
591 bw.write_u16be(height as u16)?;
592 bw.write_u16be(self.nstrips as u16)?;
593
594 for entry in self.v1_cb.iter_mut() {
595 self.rng.fill_entry(entry);
596 }
597 for entry in self.v4_cb.iter_mut() {
598 self.rng.fill_entry(entry);
599 }
600 while start_line < height {
601 self.read_strip(in_frm, start_line, end_line);
602
603 // let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cb);
604 // let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cb);
605 // elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb);
606 // elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb);
607 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb);
608 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb);
609 if self.grayscale {
610 for cw in self.v1_cur_cb.iter_mut() {
611 cw.u = 128;
612 cw.v = 128;
613 }
614 for cw in self.v4_cur_cb.iter_mut() {
615 cw.u = 128;
616 cw.v = 128;
617 }
618 }
619
620 self.v1_idx.truncate(0);
621 self.v4_idx.truncate(0);
622 self.masks.reset();
623
624 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
625 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb, *v1_entry);
626 if v1_dist == 0 {
627 self.masks.put_v1();
628 self.v1_idx.push(v1_idx);
629 continue;
630 }
631 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[0]);
632 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[1]);
633 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[2]);
634 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[3]);
635 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
636 self.masks.put_v4();
637 self.v4_idx.push(v40_idx);
638 self.v4_idx.push(v41_idx);
639 self.v4_idx.push(v42_idx);
640 self.v4_idx.push(v43_idx);
641 } else {
642 self.masks.put_v1();
643 self.v1_idx.push(v1_idx);
644 }
645 }
646 self.masks.end();
647
648 let mut is_intra_strip = start_line == 0;
649 let (upd_v1, upd_v4) = if !is_intra_strip {
650 let cb_size = if self.grayscale { 4 } else { 6 };
651 (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size),
652 Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size))
653 } else {
654 (false, false)
655 };
656 if !is_intra_strip && !upd_v1 && !upd_v4 {
657 is_intra_strip = true;
658 }
659 bw.write_byte(if is_intra_strip { 0x10 } else { 0x11 })?;
660 bw.write_u24be(0)?; // strip size
661 let strip_data_pos = bw.tell();
662 bw.write_u16be(0)?; // yoff
663 bw.write_u16be(0)?; // xoff
664 bw.write_u16be((end_line - start_line) as u16)?;
665 bw.write_u16be(width as u16)?;
666
667 Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?;
668 Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?;
669
670 self.render_stripe(true, start_line, end_line);
671
672 if self.v1_idx.len() == 0 {
673 bw.write_byte(0x32)?;
674 bw.write_u24be((self.v4_idx.len() + 4) as u32)?;
675 bw.write_buf(self.v4_idx.as_slice())?;
676 } else {
677 bw.write_byte(0x30)?;
678 bw.write_u24be(0)?;
679 let chunk_pos = bw.tell();
680 let mut v1_pos = 0;
681 let mut v4_pos = 0;
682 for _ in 0..32 {
683 self.v1_idx.push(0);
684 self.v4_idx.push(0);
685 self.v4_idx.push(0);
686 self.v4_idx.push(0);
687 self.v4_idx.push(0);
688 }
689 for mask in self.masks.masks.iter() {
690 bw.write_u32be(*mask)?;
691 for j in (0..32).rev() {
692 if (mask & (1 << j)) == 0 {
693 bw.write_byte(self.v1_idx[v1_pos])?;
694 v1_pos += 1;
695 } else {
696 bw.write_byte(self.v4_idx[v4_pos])?;
697 bw.write_byte(self.v4_idx[v4_pos + 1])?;
698 bw.write_byte(self.v4_idx[v4_pos + 2])?;
699 bw.write_byte(self.v4_idx[v4_pos + 3])?;
700 v4_pos += 4;
701 }
702 }
703 }
704 patch_size(bw, chunk_pos)?;
705 }
706
707 patch_size(bw, strip_data_pos)?;
708
709 self.v1_cb.copy_from_slice(&self.v1_cur_cb);
710 self.v4_cb.copy_from_slice(&self.v4_cur_cb);
711 start_line = end_line;
712 end_line = (end_line + strip_h).min(height);
713 }
714 patch_size(bw, frame_data_pos)?;
715 Ok(true)
716 }
717 fn encode_inter(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
718 let (width, height) = in_frm.get_dimensions(0);
719 let mut strip_h = (height / self.nstrips + 3) & !3;
720 if strip_h == 0 {
721 self.nstrips = 1;
722 strip_h = height;
723 }
724 let mut start_line = 0;
725 let mut end_line = strip_h;
726
727 bw.write_byte(1)?; // intra flag
728 bw.write_u24be(0)?; // frame size
729 let frame_data_pos = bw.tell();
730 bw.write_u16be(width as u16)?;
731 bw.write_u16be(height as u16)?;
732 bw.write_u16be(self.nstrips as u16)?;
733
734 while start_line < height {
735 self.read_strip(in_frm, start_line, end_line);
736 self.calc_skip_dist(in_frm, start_line, end_line);
737
738 // let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cb);
739 // let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cb);
740 // elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb);
741 // elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb);
742 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb);
743 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb);
744 if self.grayscale {
745 for cw in self.v1_cur_cb.iter_mut() {
746 cw.u = 128;
747 cw.v = 128;
748 }
749 for cw in self.v4_cur_cb.iter_mut() {
750 cw.u = 128;
751 cw.v = 128;
752 }
753 }
754
755 self.v1_idx.truncate(0);
756 self.v4_idx.truncate(0);
757 self.masks.reset();
758
759 let mut skip_iter = self.skip_dist.iter();
760 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
761 let skip_dist = *skip_iter.next().unwrap();
762 if skip_dist == 0 {
763 self.masks.put_inter(true);
764 continue;
765 }
766 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb, *v1_entry);
767 if skip_dist < v1_dist {
768 self.masks.put_inter(true);
769 continue;
770 } else {
771 self.masks.put_inter(false);
772 }
773 if v1_dist == 0 {
774 self.masks.put_v1();
775 self.v1_idx.push(v1_idx);
776 continue;
777 }
778 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[0]);
779 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[1]);
780 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[2]);
781 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[3]);
782 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
783 self.masks.put_v4();
784 self.v4_idx.push(v40_idx);
785 self.v4_idx.push(v41_idx);
786 self.v4_idx.push(v42_idx);
787 self.v4_idx.push(v43_idx);
788 } else {
789 self.masks.put_v1();
790 self.v1_idx.push(v1_idx);
791 }
792 }
793 self.masks.end();
794
795 let (upd_v1, upd_v4) = {
796 let cb_size = if self.grayscale { 4 } else { 6 };
797 (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size),
798 Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size))
799 };
800 bw.write_byte(0x11)?;
801 bw.write_u24be(0)?; // strip size
802 let strip_data_pos = bw.tell();
803 bw.write_u16be(0)?; // yoff
804 bw.write_u16be(0)?; // xoff
805 bw.write_u16be((end_line - start_line) as u16)?;
806 bw.write_u16be(width as u16)?;
807
808 Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?;
809 Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?;
810
811 self.render_stripe(false, start_line, end_line);
812
813 bw.write_byte(0x31)?;
814 bw.write_u24be(0)?;
815 let chunk_pos = bw.tell();
816 let mut v1_pos = 0;
817 let mut v4_pos = 0;
818 for _ in 0..32 {
819 self.v1_idx.push(0);
820 self.v4_idx.push(0);
821 self.v4_idx.push(0);
822 self.v4_idx.push(0);
823 self.v4_idx.push(0);
824 }
825 let mut skip = true;
826 for mask in self.masks.masks.iter() {
827 bw.write_u32be(*mask)?;
828 if *mask == 0 { continue; }
829 let mut bit = 1 << 31;
830 while bit > 0 {
831 if skip {
832 skip = (mask & bit) == 0;
833 bit >>= 1;
834 } else {
835 if (mask & bit) == 0 {
836 bw.write_byte(self.v1_idx[v1_pos])?;
837 v1_pos += 1;
838 } else {
839 bw.write_byte(self.v4_idx[v4_pos])?;
840 bw.write_byte(self.v4_idx[v4_pos + 1])?;
841 bw.write_byte(self.v4_idx[v4_pos + 2])?;
842 bw.write_byte(self.v4_idx[v4_pos + 3])?;
843 v4_pos += 4;
844 }
845 bit >>= 1;
846 skip = true;
847 }
848 }
849 }
850 patch_size(bw, chunk_pos)?;
851
852 patch_size(bw, strip_data_pos)?;
853
854 self.v1_cb.copy_from_slice(&self.v1_cur_cb);
855 self.v4_cb.copy_from_slice(&self.v4_cur_cb);
856 start_line = end_line;
857 end_line = (end_line + strip_h).min(height);
858 }
859 patch_size(bw, frame_data_pos)?;
860 Ok(true)
861 }
862 }
863
864 impl NAEncoder for CinepakEncoder {
865 fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult<EncodeParameters> {
866 match encinfo.format {
867 NACodecTypeInfo::None => {
868 let mut ofmt = EncodeParameters::default();
869 ofmt.format = NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, YUV420_FORMAT));
870 Ok(ofmt)
871 },
872 NACodecTypeInfo::Audio(_) => return Err(EncoderError::FormatError),
873 NACodecTypeInfo::Video(vinfo) => {
874 let pix_fmt = if vinfo.format == GRAY_FORMAT { GRAY_FORMAT } else { YUV420_FORMAT };
875 let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, true, pix_fmt);
876 let mut ofmt = EncodeParameters::default();
877 ofmt.format = NACodecTypeInfo::Video(outinfo);
878 Ok(ofmt)
879 }
880 }
881 }
882 fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult<NAStreamRef> {
883 match encinfo.format {
884 NACodecTypeInfo::None => Err(EncoderError::FormatError),
885 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
886 NACodecTypeInfo::Video(vinfo) => {
887 if vinfo.format != YUV420_FORMAT && vinfo.format != GRAY_FORMAT {
888 return Err(EncoderError::FormatError);
889 }
890 if ((vinfo.width | vinfo.height) & 3) != 0 {
891 return Err(EncoderError::FormatError);
892 }
893 if (vinfo.width | vinfo.height) >= (1 << 16) {
894 return Err(EncoderError::FormatError);
895 }
896
897 let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, false, vinfo.format);
898 let info = NACodecInfo::new("cinepak", NACodecTypeInfo::Video(out_info.clone()), None);
899 let stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den).into_ref();
900
901 self.stream = Some(stream.clone());
902 self.quality = encinfo.quality;
903 self.grayscale = vinfo.format != YUV420_FORMAT;
904 let num_blocks = vinfo.width / 2 * vinfo.height / 2;
905 self.v1_entries = Vec::with_capacity(num_blocks);
906 self.v4_entries = Vec::with_capacity(num_blocks * 4);
907 self.v1_idx = Vec::with_capacity(num_blocks);
908 self.v4_idx = Vec::with_capacity(num_blocks * 4);
909 self.skip_dist = Vec::with_capacity(vinfo.width / 4 * vinfo.height / 4);
910
911 let buf = alloc_video_buffer(out_info, 2)?;
912 self.lastfrm = Some(buf.get_vbuf().unwrap());
913
914 Ok(stream)
915 },
916 }
917 }
918 fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> {
919 let buf = frm.get_buffer();
920 if let Some(ref vbuf) = buf.get_vbuf() {
921 let mut dbuf = Vec::with_capacity(4);
922 let mut gw = GrowableMemoryWriter::new_write(&mut dbuf);
923 let mut bw = ByteWriter::new(&mut gw);
924 let is_intra = if self.frmcount == 0 {
925 self.encode_intra(&mut bw, vbuf)?
926 } else {
927 self.encode_inter(&mut bw, vbuf)?
928 };
929 self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf));
930 self.frmcount += 1;
931 if self.frmcount == 25 {
932 self.frmcount = 0;
933 }
934 Ok(())
935 } else {
936 Err(EncoderError::InvalidParameters)
937 }
938 }
939 fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>> {
940 let mut npkt = None;
941 std::mem::swap(&mut self.pkt, &mut npkt);
942 Ok(npkt)
943 }
944 fn flush(&mut self) -> EncoderResult<()> {
945 self.frmcount = 0;
946 Ok(())
947 }
948 }
949
950 impl NAOptionHandler for CinepakEncoder {
951 fn get_supported_options(&self) -> &[NAOptionDefinition] { &[] }
952 fn set_options(&mut self, _options: &[NAOption]) { }
953 fn query_option_value(&self, _name: &str) -> Option<NAValue> { None }
954 }
955
956 pub fn get_encoder() -> Box<dyn NAEncoder + Send> {
957 Box::new(CinepakEncoder::new())
958 }
959
960 #[cfg(test)]
961 mod test {
962 use nihav_core::codecs::*;
963 use nihav_core::demuxers::*;
964 use nihav_core::muxers::*;
965 use crate::*;
966 use nihav_codec_support::test::enc_video::*;
967
968 #[test]
969 fn test_cinepak_encoder() {
970 let mut dmx_reg = RegisteredDemuxers::new();
971 generic_register_all_demuxers(&mut dmx_reg);
972 let mut dec_reg = RegisteredDecoders::new();
973 generic_register_all_codecs(&mut dec_reg);
974 let mut mux_reg = RegisteredMuxers::new();
975 generic_register_all_muxers(&mut mux_reg);
976 let mut enc_reg = RegisteredEncoders::new();
977 generic_register_all_encoders(&mut enc_reg);
978
979 let dec_config = DecoderTestParams {
980 demuxer: "avi",
981 in_name: "assets/Misc/TalkingHead_352x288.avi",
982 stream_type: StreamType::Video,
983 limit: Some(2),
984 dmx_reg, dec_reg,
985 };
986 let enc_config = EncoderTestParams {
987 muxer: "avi",
988 enc_name: "cinepak",
989 out_name: "cinepak.avi",
990 mux_reg, enc_reg,
991 };
992 let dst_vinfo = NAVideoInfo {
993 width: 0,
994 height: 0,
995 format: YUV420_FORMAT,
996 flipped: true,
997 };
998 let enc_params = EncodeParameters {
999 format: NACodecTypeInfo::Video(dst_vinfo),
1000 quality: 0,
1001 bitrate: 0,
1002 tb_num: 0,
1003 tb_den: 0,
1004 flags: 0,
1005 };
1006 test_encoding_to_file(&dec_config, &enc_config, enc_params);
1007 }
1008 }