X-Git-Url: https://git.nihav.org/?p=nihav-encoder.git;a=blobdiff_plain;f=src%2Fmain.rs;fp=src%2Fmain.rs;h=ff19ae151f4eebed8f9bb64f20cf34f2a57aed5c;hp=a2889a86b6ab7df77251d3f9bf2f8aa5ba9d54c7;hb=b0481c9e4084c2946507754bc194a64306a278f8;hpb=30611a63a7e339e63f05acb263a41307255040f1 diff --git a/src/main.rs b/src/main.rs index a2889a8..ff19ae1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -13,7 +13,6 @@ use nihav_core::demuxers::*; use nihav_core::muxers::*; use nihav_core::reorder::*; use nihav_core::scale::*; -use nihav_core::soundcvt::*; use nihav_registry::detect; use nihav_registry::register; use std::env; @@ -22,6 +21,8 @@ use std::time::{Duration, Instant}; mod demux; use crate::demux::*; mod null; +mod acvt; +use crate::acvt::*; fn format_time(ms: u64) -> String { let s = ms / 1000; @@ -102,7 +103,7 @@ struct OutputStreamOptions { enum OutputConvert { Video(NAScale, NABufferType), - Audio(NAAudioInfo, NAChannelMap), + Audio(AudioConverter), None, } @@ -549,8 +550,9 @@ println!("can't generate default channel map for {} channels", dainfo.channels); return false; }, }; + let acvt = AudioConverter::new(sainfo, dainfo, dchmap); //todo channelmap - OutputConvert::Audio(*dainfo, dchmap) + OutputConvert::Audio(acvt) } }, _ => OutputConvert::None, @@ -627,7 +629,8 @@ println!("can't generate default channel map for {} channels", dainfo.channels); }, }; //todo channelmap - OutputConvert::Audio(*dainfo, dchmap) + let acvt = AudioConverter::new(sainfo, dainfo, dchmap); + OutputConvert::Audio(acvt) } }, _ => OutputConvert::None, @@ -766,13 +769,12 @@ fn encode_frame(dst_id: u32, encoder: &mut Box, cvt: &mut OutputC } dbuf.clone() }, - OutputConvert::Audio(ref dinfo, ref dchmap) => { - let ret = convert_audio_frame(&buf, dinfo, dchmap); - if ret.is_err() { + OutputConvert::Audio(ref mut acvt) => { + if !acvt.queue_frame(buf, frm.get_time_information()) { println!("error converting audio for stream {}", dst_id); return false; } - ret.unwrap() + return true; }, } }; @@ -1202,6 +1204,7 @@ println!("stream {} - {} {}", i, s, info.get_name()); break; } let frm = ret.unwrap(); + let tinfo = frm.get_info(); reorderer.add_frame(frm); while let Some(frm) = reorderer.get_frame() { if !encode_frame(dst_id, encoder, cvt, frm, &transcoder.scale_opts) { @@ -1218,6 +1221,19 @@ println!("stream {} - {} {}", i, s, info.get_name()); mux.mux_frame(pkt).unwrap(); } } + if let OutputConvert::Audio(ref mut acvt) = cvt { + while let Some(ofrm) = acvt.get_frame(tinfo.clone()) { + if encoder.encode(&ofrm).is_err() { + break; + } + while let Ok(Some(pkt)) = encoder.get_packet() { + if transcoder.end != NATimePoint::None && !pkt.ts.less_than(transcoder.end) { break 'main_loop; } + let pkt_size = pkt.get_buffer().len(); + adata_size += pkt_size; + mux.mux_frame(pkt).unwrap(); + } + } + } } else { println!("no decoder for stream {}", src_id); break;