}
}
+fn retrieve_packets(transcoder: &mut Transcoder, mux: &mut Muxer, vdata_size: &mut usize, adata_size: &mut usize) -> bool {
+ while let Some(pkt) = transcoder.queue.get_packet() {
+ if transcoder.end != NATimePoint::None && !pkt.ts.less_than(transcoder.end) {
+ return false;
+ }
+ let pkt_size = pkt.get_buffer().len();
+ match pkt.get_stream().get_media_type() {
+ StreamType::Video => { *vdata_size += pkt_size; },
+ StreamType::Audio => { *adata_size += pkt_size; },
+ _ => {},
+ };
+ if mux.mux_frame(pkt).is_err() {
+ println!("error muxing packet");
+ return false;
+ }
+ }
+ true
+}
+
#[allow(clippy::single_match)]
fn main() {
let args: Vec<_> = env::args().collect();
if mux_quirks.is_fixed_duration() {
transcoder.calc_len = true;
}
+ transcoder.fixed_rate = mux_quirks.is_fixed_rate();
transcoder.queue.set_sync(force_sync || !mux_quirks.is_unsync());
if transcoder.calc_len {
let frm = ret.unwrap();
dec_ctx.reorderer.add_frame(frm);
while let Some(frm) = dec_ctx.reorderer.get_frame() {
- if !encoder.encode_frame(dst_id, frm, &transcoder.scale_opts) {
+ if !encoder.encode_frame(dst_id, frm, &transcoder.scale_opts, &mut transcoder.queue) {
break;
}
- while let Ok(Some(pkt)) = encoder.get_packet() {
- if transcoder.end != NATimePoint::None && !pkt.ts.less_than(transcoder.end) { break 'main_loop; }
- let pkt_size = pkt.get_buffer().len();
- match pkt.get_stream().get_media_type() {
- StreamType::Video => { vdata_size += pkt_size; },
- StreamType::Audio => { adata_size += pkt_size; },
- _ => {},
- };
- transcoder.queue.queue_packet(pkt);
- }
}
} else {
println!("no decoder for stream {}", src_id);
},
};
- while let Some(pkt) = transcoder.queue.get_packet() {
- if mux.mux_frame(pkt).is_err() {
- println!("error muxing packet");
- break;
- }
+ if !retrieve_packets(&mut transcoder, &mut mux, &mut vdata_size, &mut adata_size) {
+ break;
}
}
- 'reord_flush_loop: for stream in ism.iter() {
+ /*'reord_flush_loop:*/ for stream in ism.iter() {
let src_id = stream.get_num();
if let OutputMode::Encode(dst_id, ref mut encoder) = transcoder.encoders[src_id] {
if let Some(ref mut dec_ctx) = transcoder.decoders[src_id] {
while let Some(frm) = dec_ctx.reorderer.get_last_frames() {
- if !encoder.encode_frame(dst_id, frm, &transcoder.scale_opts) {
+ if !encoder.encode_frame(dst_id, frm, &transcoder.scale_opts, &mut transcoder.queue) {
break;
}
- while let Ok(Some(pkt)) = encoder.get_packet() {
- if transcoder.end != NATimePoint::None && !pkt.ts.less_than(transcoder.end) { break 'reord_flush_loop; }
- transcoder.queue.queue_packet(pkt);
- }
}
}
}
};
}
- while let Some(pkt) = transcoder.queue.get_packet() {
- if mux.mux_frame(pkt).is_err() {
- println!("error muxing packet");
- break;
- }
- }
+ retrieve_packets(&mut transcoder, &mut mux, &mut vdata_size, &mut adata_size);
if transcoder.verbose > 0 {
println!();
}
pub trait EncoderInterface {
- fn encode_frame(&mut self, dst_id: u32, frm: NAFrameRef, scale_opts: &[(String, String)]) -> bool;
+ fn encode_frame(&mut self, dst_id: u32, frm: NAFrameRef, scale_opts: &[(String, String)], queue: &mut OutputQueue) -> bool;
fn flush(&mut self, queue: &mut OutputQueue) -> EncoderResult<()>;
fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>>;
}
}
impl EncoderInterface for AudioEncodeContext {
- fn encode_frame(&mut self, dst_id: u32, frm: NAFrameRef, _scale_opts: &[(String, String)]) -> bool {
+ fn encode_frame(&mut self, dst_id: u32, frm: NAFrameRef, _scale_opts: &[(String, String)], queue: &mut OutputQueue) -> bool {
let buf = frm.get_buffer();
let cbuf = if let NABufferType::None = buf {
buf
if self.encoder.encode(&ofrm).is_err() {
return false;
}
+ while let Ok(Some(pkt)) = self.encoder.get_packet() {
+ queue.queue_packet(pkt);
+ }
}
return true;
};
let cfrm = NAFrame::new(frm.get_time_information(), frm.frame_type, frm.key, frm.get_info(), cbuf);
self.encoder.encode(&cfrm).unwrap();
+ while let Ok(Some(pkt)) = self.encoder.get_packet() {
+ queue.queue_packet(pkt);
+ }
true
}
fn flush(&mut self, queue: &mut OutputQueue) -> EncoderResult<()> {
}
impl EncoderInterface for VideoEncodeContext {
- fn encode_frame(&mut self, dst_id: u32, frm: NAFrameRef, scale_opts: &[(String, String)]) -> bool {
+ fn encode_frame(&mut self, dst_id: u32, frm: NAFrameRef, scale_opts: &[(String, String)], queue: &mut OutputQueue) -> bool {
let buf = frm.get_buffer();
let cbuf = if let NABufferType::None = buf {
if (self.encoder.get_capabilities() & ENC_CAPS_SKIPFRAME) == 0 {
};
let cfrm = NAFrame::new(frm.get_time_information(), frm.frame_type, frm.key, frm.get_info(), cbuf);
self.encoder.encode(&cfrm).unwrap();
+ while let Ok(Some(pkt)) = self.encoder.get_packet() {
+ queue.queue_packet(pkt);
+ }
true
}
fn flush(&mut self, queue: &mut OutputQueue) -> EncoderResult<()> {
pub global_tb: (u32, u32),
pub queue: OutputQueue,
+ pub fixed_rate: bool,
}
impl Transcoder {
parse_and_apply_options!(encoder, &oopts.enc_opts, name);
+ let enc_stream = ret.unwrap();
+ let real_fmt = enc_stream.get_info().get_properties();
//todo check for params mismatch
- let enc_ctx: Box<dyn EncoderInterface> = match (&iformat, &ret_eparams.format) {
+ let enc_ctx: Box<dyn EncoderInterface> = match (&iformat, &real_fmt) {
(NACodecTypeInfo::Video(svinfo), NACodecTypeInfo::Video(dvinfo)) => {
if svinfo == dvinfo && !forced_out {
Box::new(VideoEncodeContext { encoder, scaler: None, scaler_buf: NABufferType::None })
},
_ => unreachable!(),
};
- out_sm.add_stream_ref(ret.unwrap());
+ out_sm.add_stream_ref(enc_stream);
self.encoders.push(OutputMode::Encode(out_id, enc_ctx));
} else {
println!("encoder {} is not supported by output (expected {})", istr.id, istr.get_info().get_name());