From: Kostya Shishkov Date: Sat, 30 May 2020 14:17:58 +0000 (+0200) Subject: MS Video 1 16-bit encoder and MS ADPCM encoder X-Git-Url: https://git.nihav.org/?a=commitdiff_plain;h=dab59886687a0c360a38743b9dc210e8ba269729;p=nihav.git MS Video 1 16-bit encoder and MS ADPCM encoder --- diff --git a/nihav-ms/Cargo.toml b/nihav-ms/Cargo.toml index 6c925ad..a80e394 100644 --- a/nihav-ms/Cargo.toml +++ b/nihav-ms/Cargo.toml @@ -10,13 +10,13 @@ features = [] [dependencies.nihav_codec_support] path = "../nihav-codec-support" -features = [] +features = ["vq"] [dev-dependencies] nihav_commonfmt = { path = "../nihav-commonfmt" } [features] -default = ["all_decoders"] +default = ["all_decoders", "all_encoders"] all_decoders = ["all_video_decoders", "all_audio_decoders"] decoders = [] @@ -26,3 +26,8 @@ decoder_msvideo1 = ["decoders"] all_audio_decoders = ["decoder_ima_adpcm_ms", "decoder_ms_adpcm"] decoder_ima_adpcm_ms = ["decoders"] decoder_ms_adpcm = ["decoders"] + +all_encoders = ["encoder_msvideo1", "encoder_ms_adpcm"] +encoder_msvideo1 = ["encoders"] +encoder_ms_adpcm = ["encoders"] +encoders = [] diff --git a/nihav-ms/src/codecs/mod.rs b/nihav-ms/src/codecs/mod.rs index 0e1a237..f5939fd 100644 --- a/nihav-ms/src/codecs/mod.rs +++ b/nihav-ms/src/codecs/mod.rs @@ -10,7 +10,7 @@ pub mod msvideo1; #[cfg(feature="decoder_ima_adpcm_ms")] pub mod imaadpcm; -#[cfg(feature="decoder_ms_adpcm")] +#[cfg(any(feature="decoder_ms_adpcm", feature="encoder_ms_adpcm"))] pub mod msadpcm; const MS_CODECS: &[DecoderInfo] = &[ @@ -28,3 +28,20 @@ pub fn ms_register_all_codecs(rd: &mut RegisteredDecoders) { rd.add_decoder(decoder.clone()); } } + +#[cfg(feature="encoder_msvideo1")] +pub mod msvideo1enc; + +const MS_ENCODERS: &[EncoderInfo] = &[ +#[cfg(feature="encoder_msvideo1")] + EncoderInfo { name: "msvideo1", get_encoder: msvideo1enc::get_encoder }, +#[cfg(feature="encoder_ms_adpcm")] + EncoderInfo { name: "ms-adpcm", get_encoder: msadpcm::get_encoder }, +]; + +/// Registers all available encoders provided by this crate. +pub fn ms_register_all_encoders(re: &mut RegisteredEncoders) { + for encoder in MS_ENCODERS.iter() { + re.add_encoder(encoder.clone()); + } +} diff --git a/nihav-ms/src/codecs/msadpcm.rs b/nihav-ms/src/codecs/msadpcm.rs index 687e0bd..b41333f 100644 --- a/nihav-ms/src/codecs/msadpcm.rs +++ b/nihav-ms/src/codecs/msadpcm.rs @@ -23,12 +23,18 @@ struct Predictor { impl Predictor { fn expand_nibble(&mut self, nibble: u8) -> i16 { let mul = if (nibble & 8) == 0 { i32::from(nibble) } else { i32::from(nibble) - 16 }; - let pred = ((self.sample1.wrapping_mul(self.coef1) + self.sample2.wrapping_mul(self.coef2)) >> 8) + self.delta.wrapping_mul(mul); - self.sample2 = self.sample1; - self.sample1 = pred.max(-0x8000).min(0x7FFF); + let pred = self.calc_pred() + self.delta.wrapping_mul(mul); + self.update(pred.max(-0x8000).min(0x7FFF)); self.delta = (ADAPT_TABLE[nibble as usize].wrapping_mul(self.delta) >> 8).max(16); self.sample1 as i16 } + fn calc_pred(&self) -> i32 { + self.sample1.wrapping_mul(self.coef1).wrapping_add(self.sample2.wrapping_mul(self.coef2)) >> 8 + } + fn update(&mut self, new_samp: i32) { + self.sample2 = self.sample1; + self.sample1 = new_samp; + } } struct MSADPCMDecoder { @@ -114,15 +120,16 @@ impl NADecoder for MSADPCMDecoder { } for ch in 0..channels { let samp = br.read_u16le()? as i16; - pred[ch].sample1 = i32::from(samp); - dst[off[ch]] = samp; - off[ch] += 1; + pred[ch].sample2 = i32::from(samp); } for ch in 0..channels { let samp = br.read_u16le()? as i16; - pred[ch].sample2 = i32::from(samp); - dst[off[ch]] = samp; - off[ch] += 1; + pred[ch].sample1 = i32::from(samp); + } + for ch in 0..channels { + dst[off[ch]] = pred[ch].sample2 as i16; + dst[off[ch] + 1] = pred[ch].sample1 as i16; + off[ch] += 2; } if channels == 1 { while br.left() > 0 { @@ -158,15 +165,242 @@ pub fn get_decoder() -> Box { Box::new(MSADPCMDecoder::new()) } +#[derive(Default)] +struct MSADPCMEncoder { + stream: Option, + samples: Vec, + block_len: usize, + channels: usize, + flush: bool, + srate: u32, +} + +const DEFAULT_BLOCK_LEN: usize = 256; + +impl MSADPCMEncoder { + fn new() -> Self { Self::default() } + fn encode_packet(&mut self) -> EncoderResult { + if self.samples.len() == 0 { + return Err(EncoderError::TryAgain); + } + let len = (self.samples.len() / self.channels).min(self.block_len); + if len < self.block_len && !self.flush { + return Err(EncoderError::TryAgain); + } + if len < 2 { + self.flush = false; + return Err(EncoderError::TryAgain); + } + + let mut dbuf = vec![0u8; Self::calc_block_size(len, self.channels)]; + let mut mw = MemoryWriter::new_write(dbuf.as_mut_slice()); + let mut bw = ByteWriter::new(&mut mw); + + let mut best_idx = [0usize; 2]; + for ch in 0..self.channels { + let mut best_dist = std::i64::MAX; + for i in 0..ADAPT_COEFFS.len() { + let dist = self.calc_dist(ch, i, len); + if dist < best_dist { + best_dist = dist; + best_idx[ch] = i; + } + } + bw.write_byte(best_idx[ch] as u8)?; + } + let mut dec = [Predictor::default(), Predictor::default()]; + for ch in 0..self.channels { + dec[ch].sample1 = i32::from(self.samples[ch + self.channels]); + dec[ch].sample2 = i32::from(self.samples[ch]); + dec[ch].coef1 = ADAPT_COEFFS[best_idx[ch]][0]; + dec[ch].coef2 = ADAPT_COEFFS[best_idx[ch]][1]; + if len > 2 { + let pred = dec[ch].calc_pred(); + dec[ch].delta = ((i32::from(self.samples[ch + self.channels * 2]) - pred).abs() / 4).max(16); + } else { + dec[ch].delta = 16; + } + } + for ch in 0..self.channels { + bw.write_u16le(dec[ch].delta as u16)?; + } + for ch in 0..self.channels { + bw.write_u16le(dec[ch].sample1 as u16)?; + } + for ch in 0..self.channels { + bw.write_u16le(dec[ch].sample2 as u16)?; + } + if self.channels == 1 { + for samps in self.samples.chunks(2).skip(1).take(len/2 - 1) { + let diff = i32::from(samps[0]) - dec[0].calc_pred(); + let nib0 = Self::calculate_mul(dec[0].delta, diff); + dec[0].expand_nibble(nib0); + let diff = i32::from(samps[1]) - dec[0].calc_pred(); + let nib1 = Self::calculate_mul(dec[0].delta, diff); + dec[0].expand_nibble(nib1); + bw.write_byte(nib0 * 16 + nib1)?; + } + } else { + for samps in self.samples.chunks(2).skip(2).take(len - 2) { + let diff = i32::from(samps[0]) - dec[0].calc_pred(); + let nib0 = Self::calculate_mul(dec[0].delta, diff); + dec[0].expand_nibble(nib0); + let diff = i32::from(samps[1]) - dec[1].calc_pred(); + let nib1 = Self::calculate_mul(dec[1].delta, diff); + dec[1].expand_nibble(nib1); + bw.write_byte(nib0 * 16 + nib1)?; + } + } + self.samples.drain(..len * self.channels); + drop(bw); + let ts = NATimeInfo::new(None, None, Some(1), 1, self.srate); + Ok(NAPacket::new(self.stream.clone().unwrap(), ts, true, dbuf)) + } + fn calc_dist(&self, ch: usize, idx: usize, len: usize) -> i64 { + let mut dist = 0; + let mut dec = Predictor { + sample2: i32::from(self.samples[ch]), + sample1: i32::from(self.samples[ch + self.channels]), + coef1: ADAPT_COEFFS[idx][0], + coef2: ADAPT_COEFFS[idx][1], + delta: 16, + }; + if self.channels == 1 { + for samp in self.samples.iter().skip(2).take(len - 2) { + let pred = dec.calc_pred(); + dec.update(pred); + let diff = i64::from(*samp) - i64::from(pred); + dist += diff * diff; + } + } else { + for samp in self.samples.chunks(2).skip(2).take(len - 2) { + let pred = dec.calc_pred(); + dec.update(pred); + let diff = i64::from(samp[ch]) - i64::from(pred); + dist += diff * diff; + } + } + dist + } + fn calculate_mul(delta: i32, diff: i32) -> u8 { + ((diff / delta).max(-8).min(7) & 0xF) as u8 + } + fn calc_block_size(nsamps: usize, channels: usize) -> usize { + (nsamps - 2) * channels / 2 + 7 * channels + } +} + +impl NAEncoder for MSADPCMEncoder { + fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult { + match encinfo.format { + NACodecTypeInfo::None => { + let mut ofmt = EncodeParameters::default(); + ofmt.format = NACodecTypeInfo::Audio(NAAudioInfo::new(0, 1, SND_S16_FORMAT, DEFAULT_BLOCK_LEN)); + return Ok(ofmt); + }, + NACodecTypeInfo::Video(_) => return Err(EncoderError::FormatError), + NACodecTypeInfo::Audio(ainfo) => { + let mut outinfo = ainfo; + outinfo.channels = outinfo.channels.min(2); + if outinfo.format != SND_S16P_FORMAT && outinfo.format != SND_S16_FORMAT { + outinfo.format = SND_S16_FORMAT; + } + if outinfo.block_len == 0 { + outinfo.block_len = DEFAULT_BLOCK_LEN; + } + if outinfo.block_len < 2 { + outinfo.block_len = 2; + } + if (outinfo.channels == 1) && ((outinfo.block_len & 1) == 1) { + outinfo.block_len += 1; + } + let mut ofmt = EncodeParameters::default(); + ofmt.format = NACodecTypeInfo::Audio(outinfo); + return Ok(ofmt); + } + }; + } + fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult { + match encinfo.format { + NACodecTypeInfo::None => Err(EncoderError::FormatError), + NACodecTypeInfo::Video(_) => Err(EncoderError::FormatError), + NACodecTypeInfo::Audio(ainfo) => { + if ainfo.format != SND_S16P_FORMAT && ainfo.format != SND_S16_FORMAT { + return Err(EncoderError::FormatError); + } + if ainfo.channels != 1 && ainfo.channels != 2 { + return Err(EncoderError::FormatError); + } + if ainfo.block_len < 2 || ((ainfo.block_len * (ainfo.channels as usize)) & 1) != 0 { + return Err(EncoderError::FormatError); + } + self.channels = ainfo.channels as usize; + self.block_len = ainfo.block_len; + + let soniton = NASoniton::new(4, 0); + let out_ainfo = NAAudioInfo::new(ainfo.sample_rate, ainfo.channels, soniton, Self::calc_block_size(self.block_len, self.channels)); + let info = NACodecInfo::new("ms-adpcm", NACodecTypeInfo::Audio(out_ainfo), None); + let stream = NAStream::new(StreamType::Audio, stream_id, info.clone(), self.block_len as u32, ainfo.sample_rate).into_ref(); + + self.stream = Some(stream.clone()); + self.samples = Vec::with_capacity(self.block_len * self.channels); + self.srate = ainfo.sample_rate; + self.flush = false; + + Ok(stream) + }, + } + } + fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> { + let buf = frm.get_buffer(); + if let Some(ref abuf) = buf.get_abuf_i16() { + let src = abuf.get_data(); + let len = abuf.get_length(); + let ch = abuf.get_chmap().num_channels(); + if abuf.get_step() > 1 || ch == 1 { + self.samples.extend(src.iter().take(len * ch)); + } else { + let (src0, src1) = src.split_at(abuf.get_stride()); + self.samples.reserve(len * 2); + for (s0, s1) in src0.iter().take(len).zip(src1.iter()) { + self.samples.push(*s0); + self.samples.push(*s1); + } + } + Ok(()) + } else { + Err(EncoderError::InvalidParameters) + } + } + fn get_packet(&mut self) -> EncoderResult> { + if let Ok(pkt) = self.encode_packet() { + Ok(Some(pkt)) + } else { + Ok(None) + } + } + fn flush(&mut self) -> EncoderResult<()> { + self.flush = true; + Ok(()) + } +} + +pub fn get_encoder() -> Box { + Box::new(MSADPCMEncoder::new()) +} + #[cfg(test)] mod test { - use nihav_core::codecs::RegisteredDecoders; - use nihav_core::demuxers::RegisteredDemuxers; + use nihav_core::codecs::*; + use nihav_core::demuxers::*; + use nihav_core::muxers::*; use nihav_codec_support::test::dec_video::*; - use crate::ms_register_all_codecs; - use nihav_commonfmt::generic_register_all_demuxers; + use nihav_codec_support::test::enc_video::*; + use crate::*; + use nihav_commonfmt::*; + #[cfg(feature="decoder_ms_adpcm")] #[test] - fn test_ms_adpcm() { + fn test_ms_adpcm_decoder() { let mut dmx_reg = RegisteredDemuxers::new(); generic_register_all_demuxers(&mut dmx_reg); let mut dec_reg = RegisteredDecoders::new(); @@ -175,4 +409,46 @@ mod test { test_decoding("avi", "ms-adpcm", "assets/MS/dance.avi", None, &dmx_reg, &dec_reg, ExpectedTestResult::MD5([0x9d6619e1, 0x60d83560, 0xfe5c1fb7, 0xad5d130d])); } + #[cfg(feature="encoder_ms_adpcm")] + #[test] + fn test_ms_adpcm_encoder() { + let mut dmx_reg = RegisteredDemuxers::new(); + generic_register_all_demuxers(&mut dmx_reg); + let mut dec_reg = RegisteredDecoders::new(); + generic_register_all_codecs(&mut dec_reg); + ms_register_all_codecs(&mut dec_reg); + let mut mux_reg = RegisteredMuxers::new(); + generic_register_all_muxers(&mut mux_reg); + let mut enc_reg = RegisteredEncoders::new(); + ms_register_all_encoders(&mut enc_reg); + + let dec_config = DecoderTestParams { + demuxer: "avi", + in_name: "assets/Indeo/laser05.avi", + stream_type: StreamType::Audio, + limit: None, + dmx_reg, dec_reg, + }; + let enc_config = EncoderTestParams { + muxer: "wav", + enc_name: "ms-adpcm", + out_name: "msadpcm.wav", + mux_reg, enc_reg, + }; + let dst_ainfo = NAAudioInfo { + sample_rate: 0, + channels: 0, + format: SND_S16_FORMAT, + block_len: 128, + }; + let enc_params = EncodeParameters { + format: NACodecTypeInfo::Audio(dst_ainfo), + quality: 0, + bitrate: 0, + tb_num: 0, + tb_den: 0, + flags: 0, + }; + test_encoding_to_file(&dec_config, &enc_config, enc_params); + } } diff --git a/nihav-ms/src/codecs/msvideo1enc.rs b/nihav-ms/src/codecs/msvideo1enc.rs new file mode 100644 index 0000000..f9b06cf --- /dev/null +++ b/nihav-ms/src/codecs/msvideo1enc.rs @@ -0,0 +1,537 @@ +use nihav_core::codecs::*; +use nihav_core::io::byteio::*; +use nihav_codec_support::vq::*; + +#[derive(Default,Clone,Copy,PartialEq)] +struct Pixel16(u16); + +impl Pixel16 { + fn unpack(&self) -> (u8, u8, u8) { + (((self.0 >> 10) & 0x1F) as u8, ((self.0 >> 5) & 0x1F) as u8, (self.0 & 0x1F) as u8) + } + fn pack(r: u8, g: u8, b: u8) -> Self { + Pixel16{ 0: (u16::from(r) << 10) | (u16::from(g) << 5) | u16::from(b) } + } +} +impl VQElement for Pixel16 { + fn dist(&self, rval: Self) -> u32 { + let (r0, g0, b0) = self.unpack(); + let (r1, g1, b1) = rval.unpack(); + let rd = i32::from(r0) - i32::from(r1); + let gd = i32::from(g0) - i32::from(g1); + let bd = i32::from(b0) - i32::from(b1); + (rd * rd + gd * gd + bd * bd) as u32 + } + fn min_cw() -> Self { Pixel16(0x0000) } + fn max_cw() -> Self { Pixel16(0x7FFF) } + fn min(&self, rval: Self) -> Self { + let (r0, g0, b0) = self.unpack(); + let (r1, g1, b1) = rval.unpack(); + Self::pack(r0.min(r1), g0.min(g1), b0.min(b1)) + } + fn max(&self, rval: Self) -> Self { + let (r0, g0, b0) = self.unpack(); + let (r1, g1, b1) = rval.unpack(); + Self::pack(r0.max(r1), g0.max(g1), b0.max(b1)) + } + fn num_components() -> usize { 3 } + fn sort_by_component(arr: &mut [Self], component: usize) { + let mut counts = [0; 32]; + for pix in arr.iter() { + let (r, g, b) = pix.unpack(); + let idx = match component { + 0 => r, + 1 => g, + _ => b, + } as usize; + counts[idx] += 1; + } + let mut offs = [0; 32]; + for i in 0..31 { + offs[i + 1] = offs[i] + counts[i]; + } + let mut dst = vec![Pixel16(0); arr.len()]; + for pix in arr.iter() { + let (r, g, b) = pix.unpack(); + let idx = match component { + 0 => r, + 1 => g, + _ => b, + } as usize; + dst[offs[idx]] = *pix; + offs[idx] += 1; + } + arr.copy_from_slice(dst.as_slice()); + } + fn max_dist_component(min: &Self, max: &Self) -> usize { + let (r0, g0, b0) = max.unpack(); + let (r1, g1, b1) = min.unpack(); + let rd = u32::from(r0) - u32::from(r1); + let gd = u32::from(g0) - u32::from(g1); + let bd = u32::from(b0) - u32::from(b1); + if rd > gd && rd > bd { + 0 + } else if bd > rd && bd > gd { + 2 + } else { + 1 + } + } +} + +struct Pixel16Sum { + rsum: u64, + gsum: u64, + bsum: u64, + count: u64, +} + +impl VQElementSum for Pixel16Sum { + fn zero() -> Self { Pixel16Sum { rsum: 0, gsum: 0, bsum: 0, count: 0 } } + fn add(&mut self, rval: Pixel16, count: u64) { + let (r, g, b) = rval.unpack(); + self.rsum += u64::from(r) * count; + self.gsum += u64::from(g) * count; + self.bsum += u64::from(b) * count; + self.count += count; + } + fn get_centroid(&self) -> Pixel16 { + if self.count != 0 { + let r = ((self.rsum + self.count / 2) / self.count) as u8; + let g = ((self.gsum + self.count / 2) / self.count) as u8; + let b = ((self.bsum + self.count / 2) / self.count) as u8; + Pixel16::pack(r, g, b) + } else { + Pixel16(0x0000) + } + } +} + +#[derive(Default)] +struct BlockState { + fill_dist: u32, + fill_val: Pixel16, + clr2_dist: u32, + clr2_flags: u16, + clr2: [Pixel16; 2], + clr8_dist: u32, + clr8_flags: u16, + clr8: [[Pixel16; 2]; 4], +} + +impl BlockState { + fn calc_stats(&mut self, buf: &[Pixel16; 16]) { + let num_cw = quantise_median_cut::(buf, &mut self.clr2); + if num_cw == 1 { + self.fill_val = Pixel16 { 0: buf[0].0 & !0x400 }; + } else { + let mut avg = Pixel16Sum::zero(); + for pix in buf.iter() { + avg.add(*pix, 1); + } + self.fill_val = Pixel16 { 0: avg.get_centroid().0 & !0x400 }; + } + self.fill_dist = 0; + for pix in buf.iter() { + self.fill_dist += pix.dist(self.fill_val); + } + if self.fill_dist == 0 { + self.clr2_dist = std::u32::MAX; + self.clr8_dist = std::u32::MAX; + return; + } + + self.clr2_flags = 0u16; + if num_cw == 2 { + let mut mask = 1; + self.clr2_dist = 0; + for pix in buf.iter() { + let dist0 = pix.dist(self.clr2[0]); + let dist1 = pix.dist(self.clr2[1]); + if dist0 < dist1 { + self.clr2_flags |= mask; + self.clr2_dist += dist0; + } else { + self.clr2_dist += dist1; + } + mask <<= 1; + } + if (self.clr2_flags & 0x8000) != 0 { + self.clr2_flags = !self.clr2_flags; + self.clr2.swap(0, 1); + } + } else { + self.clr2_dist = self.fill_dist; + self.clr2 = [self.fill_val; 2]; + } + if self.clr2_dist == 0 { + self.clr8_dist = std::u32::MAX; + return; + } + + self.clr8 = [[Pixel16 { 0: 0}; 2]; 4]; + self.clr8_flags = 0; + self.clr8_dist = 0; + let mut mask = 1; + for i in 0..4 { + let off = (i & 1) * 2 + (i & 2) * 4; + let src2 = [buf[off], buf[off + 1], buf[off + 4], buf[off + 5]]; + let nc = quantise_median_cut::(&src2, &mut self.clr8[i]); + if nc < 2 { + self.clr8[i][1] = self.clr8[i][0]; + } + for j in 0..4 { + let dist0 = src2[j].dist(self.clr8[i][0]); + let dist1 = src2[j].dist(self.clr8[i][1]); + if dist0 < dist1 { + self.clr8_flags |= mask; + self.clr8_dist += dist0; + } else { + self.clr8_dist += dist1; + } + mask <<= 1; + } + } + if (self.clr8_flags & 0x8000) != 0 { + self.clr8_flags ^= 0xF000; + self.clr8[3].swap(0, 1); + } + } + fn put_fill(&self, dst: &mut [u16], dstride: usize) { + for line in dst.chunks_mut(dstride) { + for i in 0..4 { + line[i] = self.fill_val.0; + } + } + } + fn put_clr2(&self, dst: &mut [u16], dstride: usize) { + for j in 0..4 { + for i in 0..4 { + if (self.clr2_flags & (1 << (i + j * 4))) == 0 { + dst[i + j * dstride] = self.clr2[0].0; + } else { + dst[i + j * dstride] = self.clr2[1].0; + } + } + } + } + fn put_clr8(&self, dst: &mut [u16], dstride: usize) { + for i in 0..4 { + let off = (i & 1) * 2 + (i & 2) * dstride; + let cur_flg = (self.clr8_flags >> (i * 4)) & 0xF; + dst[off] = self.clr8[i][( !cur_flg & 1) as usize].0; + dst[off + 1] = self.clr8[i][((!cur_flg >> 1) & 1) as usize].0; + dst[off + dstride] = self.clr8[i][((!cur_flg >> 2) & 1) as usize].0; + dst[off + 1 + dstride] = self.clr8[i][((!cur_flg >> 3) & 1) as usize].0; + } + } + fn write_fill(&self, bw: &mut ByteWriter) -> EncoderResult<()> { + bw.write_u16le(self.fill_val.0 | 0x8000)?; + Ok(()) + } + fn write_clr2(&self, bw: &mut ByteWriter) -> EncoderResult<()> { + bw.write_u16le(self.clr2_flags)?; + bw.write_u16le(self.clr2[0].0)?; + bw.write_u16le(self.clr2[1].0)?; + Ok(()) + } + fn write_clr8(&self, bw: &mut ByteWriter) -> EncoderResult<()> { + bw.write_u16le(self.clr8_flags)?; + bw.write_u16le(self.clr8[0][0].0 | 0x8000)?; + bw.write_u16le(self.clr8[0][1].0)?; + bw.write_u16le(self.clr8[1][0].0)?; + bw.write_u16le(self.clr8[1][1].0)?; + bw.write_u16le(self.clr8[2][0].0)?; + bw.write_u16le(self.clr8[2][1].0)?; + bw.write_u16le(self.clr8[3][0].0)?; + bw.write_u16le(self.clr8[3][1].0)?; + Ok(()) + } +} + +struct MSVideo1Encoder { + stream: Option, + pkt: Option, + pool: NAVideoBufferPool, + lastfrm: Option>, + quality: u8, + frmcount: u8, +} + +impl MSVideo1Encoder { + fn new() -> Self { + Self { + stream: None, + pkt: None, + pool: NAVideoBufferPool::new(2), + lastfrm: None, + quality: 0, + frmcount: 0, + } + } + fn get_block(src: &[u16], sstride: usize, buf: &mut [Pixel16; 16]) { + for (line, dst) in src.chunks(sstride).zip(buf.chunks_mut(4)) { + for i in 0..4 { + dst[i] = Pixel16 { 0: line[i] }; + } + } + } + fn write_skips(bw: &mut ByteWriter, skips: usize) -> EncoderResult<()> { + bw.write_u16le((skips as u16) | 0x8400)?; + Ok(()) + } + fn encode_inter(bw: &mut ByteWriter, cur_frm: &mut NAVideoBuffer, in_frm: &NAVideoBuffer, prev_frm: &NAVideoBuffer, _quality: u8) -> EncoderResult { + let mut is_intra = true; + let src = in_frm.get_data(); + let sstride = in_frm.get_stride(0); + let soff = in_frm.get_offset(0); + let (w, h) = in_frm.get_dimensions(0); + let rsrc = prev_frm.get_data(); + let rstride = prev_frm.get_stride(0); + let roff = prev_frm.get_offset(0); + let dstride = cur_frm.get_stride(0); + let doff = cur_frm.get_offset(0); + let dst = cur_frm.get_data_mut().unwrap(); + let mut skip_run = 0; + for ((sstrip, rstrip), dstrip) in (&src[soff..]).chunks(sstride * 4).take(h / 4).zip((&rsrc[roff..]).chunks(rstride * 4)).zip((&mut dst[doff..]).chunks_mut(dstride * 4)) { + for x in (0..w).step_by(4) { + let mut buf = [Pixel16::min_cw(); 16]; + let mut refbuf = [Pixel16::min_cw(); 16]; + Self::get_block(&sstrip[x..], sstride, &mut buf); + Self::get_block(&rstrip[x..], rstride, &mut refbuf); + + let mut skip_dist = 0; + for (pix, rpix) in buf.iter().zip(refbuf.iter()) { + skip_dist += pix.dist(*rpix); + } + if skip_dist == 0 { + skip_run += 1; + is_intra = false; + if skip_run == 1023 { + Self::write_skips(bw, skip_run)?; + skip_run = 0; + } + continue; + } + + let mut bstate = BlockState::default(); + bstate.calc_stats(&buf); + + let dst = &mut dstrip[x..]; + if skip_dist <= bstate.fill_dist { + skip_run += 1; + is_intra = false; + if skip_run == 1023 { + Self::write_skips(bw, skip_run)?; + skip_run = 0; + } + } else if bstate.fill_dist <= bstate.clr2_dist { + bstate.put_fill(dst, dstride); + if skip_run != 0 { + Self::write_skips(bw, skip_run)?; + skip_run = 0; + } + bstate.write_fill(bw)?; + } else if bstate.clr8_dist < bstate.clr2_dist { + bstate.put_clr8(dst, dstride); + if skip_run != 0 { + Self::write_skips(bw, skip_run)?; + skip_run = 0; + } + bstate.write_clr8(bw)?; + } else { + bstate.put_clr2(dst, dstride); + if skip_run != 0 { + Self::write_skips(bw, skip_run)?; + skip_run = 0; + } + bstate.write_clr2(bw)?; + } + } + } + if skip_run != 0 { + Self::write_skips(bw, skip_run)?; + } + if is_intra { + bw.write_u16le(0)?; + } //xxx: something for inter? + Ok(is_intra) + } + fn encode_intra(bw: &mut ByteWriter, cur_frm: &mut NAVideoBuffer, in_frm: &NAVideoBuffer, _quality: u8) -> EncoderResult { + let src = in_frm.get_data(); + let sstride = in_frm.get_stride(0); + let soff = in_frm.get_offset(0); + let (w, h) = in_frm.get_dimensions(0); + let dstride = cur_frm.get_stride(0); + let doff = cur_frm.get_offset(0); + let dst = cur_frm.get_data_mut().unwrap(); + for (sstrip, dstrip) in (&src[soff..]).chunks(sstride * 4).take(h / 4).zip((&mut dst[doff..]).chunks_mut(dstride * 4)) { + for x in (0..w).step_by(4) { + let mut buf = [Pixel16::min_cw(); 16]; + Self::get_block(&sstrip[x..], sstride, &mut buf); + let mut bstate = BlockState::default(); + bstate.calc_stats(&buf); + + let dst = &mut dstrip[x..]; + if bstate.fill_dist <= bstate.clr2_dist { + bstate.put_fill(dst, dstride); + bstate.write_fill(bw)?; + } else if bstate.clr8_dist < bstate.clr2_dist { + bstate.put_clr8(dst, dstride); + bstate.write_clr8(bw)?; + } else { + bstate.put_clr2(dst, dstride); + bstate.write_clr2(bw)?; + } + } + } + bw.write_u16le(0)?; + Ok(true) + } +} + +const RGB555_FORMAT: NAPixelFormaton = NAPixelFormaton { + model: ColorModel::RGB(RGBSubmodel::RGB), components: 3, + comp_info: [ + Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, packed: true, depth: 5, shift: 10, comp_offs: 0, next_elem: 2 }), + Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, packed: true, depth: 5, shift: 5, comp_offs: 0, next_elem: 2 }), + Some(NAPixelChromaton{ h_ss: 0, v_ss: 0, packed: true, depth: 5, shift: 0, comp_offs: 0, next_elem: 2 }), + None, None], + elem_size: 2, be: false, alpha: false, palette: false }; + +impl NAEncoder for MSVideo1Encoder { + fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult { + match encinfo.format { + NACodecTypeInfo::None => { + let mut ofmt = EncodeParameters::default(); + ofmt.format = NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, RGB555_FORMAT)); + Ok(ofmt) + }, + NACodecTypeInfo::Audio(_) => return Err(EncoderError::FormatError), + NACodecTypeInfo::Video(vinfo) => { + let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, true, RGB555_FORMAT); + let mut ofmt = EncodeParameters::default(); + ofmt.format = NACodecTypeInfo::Video(outinfo); + Ok(ofmt) + } + } + } + fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult { + match encinfo.format { + NACodecTypeInfo::None => Err(EncoderError::FormatError), + NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError), + NACodecTypeInfo::Video(vinfo) => { + if vinfo.format != RGB555_FORMAT { + return Err(EncoderError::FormatError); + } + if ((vinfo.width | vinfo.height) & 3) != 0 { + return Err(EncoderError::FormatError); + } + + let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, true, RGB555_FORMAT); + let info = NACodecInfo::new("msvideo1", NACodecTypeInfo::Video(out_info.clone()), None); + let stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den).into_ref(); + if let Err(_) = self.pool.prealloc_video(out_info, 2) { + return Err(EncoderError::AllocError); + } + + self.stream = Some(stream.clone()); + self.quality = encinfo.quality; + + Ok(stream) + }, + } + } + fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> { + let buf = frm.get_buffer(); + if let Some(ref vbuf) = buf.get_vbuf16() { + let mut cur_frm = self.pool.get_free().unwrap(); + let mut dbuf = Vec::with_capacity(4); + let mut gw = GrowableMemoryWriter::new_write(&mut dbuf); + let mut bw = ByteWriter::new(&mut gw); + if self.frmcount == 0 { + self.lastfrm = None; + } + let is_intra = if let Some(ref prev_buf) = self.lastfrm { + Self::encode_inter(&mut bw, &mut cur_frm, vbuf, prev_buf, self.quality)? + } else { + Self::encode_intra(&mut bw, &mut cur_frm, vbuf, self.quality)? + }; + self.lastfrm = Some(cur_frm); + self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf)); + self.frmcount += 1; + if self.frmcount == 25 { + self.frmcount = 0; + } + Ok(()) + } else { + Err(EncoderError::InvalidParameters) + } + } + fn get_packet(&mut self) -> EncoderResult> { + let mut npkt = None; + std::mem::swap(&mut self.pkt, &mut npkt); + Ok(npkt) + } + fn flush(&mut self) -> EncoderResult<()> { + self.frmcount = 0; + Ok(()) + } +} + +pub fn get_encoder() -> Box { + Box::new(MSVideo1Encoder::new()) +} + +#[cfg(test)] +mod test { + use nihav_core::codecs::*; + use nihav_core::demuxers::*; + use nihav_core::muxers::*; + use crate::*; + use nihav_commonfmt::*; + use nihav_codec_support::test::enc_video::*; + use super::RGB555_FORMAT; + + #[test] + fn test_ms_video1_encoder() { + let mut dmx_reg = RegisteredDemuxers::new(); + generic_register_all_demuxers(&mut dmx_reg); + let mut dec_reg = RegisteredDecoders::new(); + generic_register_all_codecs(&mut dec_reg); + ms_register_all_codecs(&mut dec_reg); + let mut mux_reg = RegisteredMuxers::new(); + generic_register_all_muxers(&mut mux_reg); + let mut enc_reg = RegisteredEncoders::new(); + ms_register_all_encoders(&mut enc_reg); + + let dec_config = DecoderTestParams { + demuxer: "avi", + in_name: "assets/Misc/TalkingHead_352x288.avi", + stream_type: StreamType::Video, + limit: Some(32), + dmx_reg, dec_reg, + }; + let enc_config = EncoderTestParams { + muxer: "avi", + enc_name: "msvideo1", + out_name: "msvideo1.avi", + mux_reg, enc_reg, + }; + let dst_vinfo = NAVideoInfo { + width: 0, + height: 0, + format: RGB555_FORMAT, + flipped: true, + }; + let enc_params = EncodeParameters { + format: NACodecTypeInfo::Video(dst_vinfo), + quality: 0, + bitrate: 0, + tb_num: 0, + tb_den: 0, + flags: 0, + }; + test_encoding_to_file(&dec_config, &enc_config, enc_params); + } +} diff --git a/nihav-ms/src/lib.rs b/nihav-ms/src/lib.rs index d543744..99597a6 100644 --- a/nihav-ms/src/lib.rs +++ b/nihav-ms/src/lib.rs @@ -3,3 +3,4 @@ extern crate nihav_codec_support; mod codecs; pub use crate::codecs::ms_register_all_codecs; +pub use crate::codecs::ms_register_all_encoders;