javaCV简单解析gb28181的rtp ps流,并推流到rtmp服务_javacv 解析rtp包-程序员宅基地

技术标签: ps  javacv  javacv解析ps流  javacv解析rtp  gb28181  

本文转自javacv社区三群管理员“赶在时间前面过去的过去了的博客,感谢大佬倾情贡献,支持javacv社区发展和壮大。

国标gb28181全系列都可以参考过去的过去了的博客,再次表示感谢。

解析流程参考 https://blog.csdn.net/chen495810242/article/details/39207305

代码基于github上的修改 https://github.com/yangjiechina/JGB28181

流解析的代码长时间测试海康摄像时还不稳定,所以主要以学习为主,有知道的大佬欢迎指点下=。=

涉及到的相关类

BitUtils

public class BitUtils {
	public static  int byte2ToInt(byte b1,byte b2){
		/*int temp1 = b1&0xff ;
		int temp2 = b2&0xff ;
		return (temp1<< 8) + temp2;*/
		return byteToInt(b1,b2);
	}
	public static  int byte4ToInt(byte b1,byte b2,byte b3,byte b4){
		/*int temp1 = b1&0xff ;
		int temp2 = b2&0xff ;
		int temp3 = b3&0xff ;
		int temp4 = b4&0xff ;
		return (temp1 << 24) + (temp2<< 16)+(temp3<< 8)+temp4;*/
		return byteToInt(b1,b2,b3,b4);
	}

	public static  int byteToInt(byte... bs){
		int len=bs.length;
		int temp=0;
		for(byte b:bs){
			len--;
			if(len==0){
				temp+=(b&0xff);
			}else{
				temp+=(((b&0xff)<< (len*8)));
			}
		}
		return temp;
	}
	public static void main(String[] args) {
		String str="0b873697";
		byte[] bytes=HexStringUtils.chars2Bytes(str.toCharArray());
		System.out.println(byteToInt(bytes[0],bytes[1],bytes[2],bytes[3]));
		System.out.println(byte4ToInt(bytes[0],bytes[1],bytes[2],bytes[3]));

		/*byte[] bytes=str.getBytes();
		System.out.println(HexStringUtils.toHexString(bytes));*/
	}
}

HexStringUtils

public class HexStringUtils {

	private static final char[] DIGITS_HEX = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };

	protected static char[] encodeHex(byte[] data) {
		int l = data.length;
		char[] out = new char[l << 1];
		for (int i = 0, j = 0; i < l; i++) {
			out[j++] = DIGITS_HEX[(0xF0 & data[i]) >>> 4];
			out[j++] = DIGITS_HEX[0x0F & data[i]];
		}
		return out;
	}

	public static String byte2String(byte b){
		byte[] data =new byte[1];
		data[0]=b;
		int l = data.length;
		char[] out = new char[l << 1];
		for (int i = 0, j = 0; i < l; i++) {
			out[j++] = DIGITS_HEX[(0xF0 & data[i]) >>> 4];
			out[j++] = DIGITS_HEX[0x0F & data[i]];
		}
		return new String(out);
	}

	protected static byte[] decodeHex(char[] data) {
		int len = data.length;
		if ((len & 0x01) != 0) {
			throw new RuntimeException("字符个数应该为偶数");
		}
		byte[] out = new byte[len >> 1];
		for (int i = 0, j = 0; j < len; i++) {
			int f = toDigit(data[j], j) << 4;
			j++;
			f |= toDigit(data[j], j);
			j++;
			out[i] = (byte) (f & 0xFF);
		}
		return out;
	}

	protected static int toDigit(char ch, int index) {
		int digit = Character.digit(ch, 16);
		if (digit == -1) {
			throw new RuntimeException("Illegal hexadecimal character " + ch + " at index " + index);
		}
		return digit;
	}

	public static String toHexString(byte[] bs) {
		return new String(encodeHex(bs));
	}

	public static String hexString2Bytes(String hex) {
		return new String(decodeHex(hex.toCharArray()));
	}

	public static byte[] chars2Bytes(char[] bs) {
		return decodeHex(bs);
	}
}

Parser


import com.fengyulei.fylsipserver.media.push.RtmpPusher;

import java.util.Map;

public interface Parser {

	/**
	 * TCP包长字节
	 * 2个字节长
	 */
	int TCP_PACKET_LENGTH = 2;
	/**
	 * 有扩展字段,但是没有遇到过。
	 * 基本都为12字节
	 */
	int RTP_HEADER_LENGTH = 12;

	/**
	 * UDP模式,除去rtp头的起始字节
	 */
	int UDP_START_INDEX = RTP_HEADER_LENGTH;
	/**
	 * TCP模式,比UDP模式多2个字节
	 */
	int TCP_START_INDEX = TCP_PACKET_LENGTH +RTP_HEADER_LENGTH;

	/**
	 * UDP模式
	 * ps扩展内容字段索引
	 * rtp(12)+ 00 00 01 ba(4)+10字节(长度固定,最后一个字节低3位,为扩展内容长度)
	 */
	int UDP_PS_HEADER_STUFFING_LENGTH_INDEX = 25;

	/**
	 * TCP模式,依次延长2字节
	 */
	int TCP_PS_HEADER_STUFFING_LENGTH_INDEX = UDP_PS_HEADER_STUFFING_LENGTH_INDEX+2;
	/**
	 * crc32校验
	 * 固定4字节长度
	 */
	int CRC_32_LENGTH = 4;
	
	void parseUdp(Map<Integer, Packet> packetMap, int firstSeq, int endSeq,RtmpPusher rtmpPusher) throws Exception;

	void parseTcp(Map<Integer,Packet> packetMap,int firstSeq,int endSeq,RtmpPusher rtmpPusher) throws Exception;
}

Packet

public  class Packet {
	//I帧
	public  static final int I = 0;P
	//P帧
	public 	static final int P = 1;
	//音频
	public static final  int AUDIO = 2;
	//合并包
	public  static final int SUB_PACKET = 3;

	private int timeStamp;   

	private int seq;

	private byte[] data;

	private int packetType;

	public  int getPacketType(){

		return packetType;
	}


	public Packet(int seq, byte[] data, int packetType) {
		this.seq = seq;
		this.data = data;
		this.packetType = packetType;
	}


	public byte[] getData(){
		return data;
	}

	public int getTimeStamp() {
		return timeStamp;
	}

	public void setTimeStamp(int timeStamp) {
		this.timeStamp = timeStamp;
	}

	public int getSeq() {
		return seq;
	}

	public void setSeq(int seq) {
		this.seq = seq;
	}

	public void setData(byte[] data) {
		this.data = data;
	}
}

udp数据包缓存 SsrcUdpHandler

package com.fengyulei.fylsipserver.media.netty;

import com.fengyulei.fylsipserver.media.codec.CommonParser;
import com.fengyulei.fylsipserver.media.codec.Packet;
import com.fengyulei.fylsipserver.media.common.utils.BitUtils;
import com.fengyulei.fylsipserver.media.common.utils.HexStringUtils;
import com.fengyulei.fylsipserver.media.push.RtmpPusher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentLinkedDeque;

public class SsrcUdpHandler {

    private static final Logger logger = LoggerFactory.getLogger(SsrcUdpHandler.class);

    /**
     * 存储关键数据包(i/p/audio)seq的map
     */
    private ConcurrentLinkedDeque<Integer> mSeqMap = new ConcurrentLinkedDeque<>();
	//存储数据包,结合序号功能进行排序
    private Map<Integer,Packet> mPacketMap = new HashMap<>(60);

    /**
     * 第一帧是否为I帧
     * 不为I帧,直接丢弃
     */
    private boolean mIsFirstI;
	//流的唯一标识
    private String ssrc;
	//推流器,通过构造方法传入
    private RtmpPusher rtmpPusher;

    public RtmpPusher getRtmpPusher() {
        return rtmpPusher;
    }
	//rtp ps解析器
    private CommonParser mParser;

    //缓存多几帧 一定程度上减少udp包乱序的问题
    private int CACHE_FRAME_LENGTH= 5;

    public SsrcUdpHandler(CommonParser mParser,String ssrc){
        this.mParser=mParser;
        this.ssrc=ssrc;
        //该链接可以自行修改
        rtmpPusher=new RtmpPusher("rtmp://192.168.1.201:1935/live/"+ssrc,ssrc,"UDP");
        rtmpPusher.startRemux();
    }

    public void read(byte[] copyData){
        int length=copyData.length;
        //截取序号,具体位置查看文章头解析rtp的博客
        int seq = BitUtils.byte2ToInt(copyData[2],copyData[3]);
        try{
			//保存数据包
            Packet packet;
            //去除rtp 12个字节(tcp模式头部为14个字节)请求头后,以000001ba开头的为新的视频帧
            if(length > 16 && copyData[12] == 0 &&copyData[13] ==0 &&copyData[14] ==01 && (copyData[15]&0xff) == 0xba){
                int stuffingLength =  copyData[25] & 7;
                int startIndex = 25+stuffingLength+1;
                //i帧 000001bb
                if(copyData[startIndex] == 0 && copyData[startIndex+1] == 0&&copyData[startIndex+2] == 01&&(copyData[startIndex+3]&0xff) == 0xbb )
                {
                    packet = new Packet(seq,copyData,Packet.I);
                    if(!mIsFirstI){
                        mIsFirstI = true;
                    }
                }
                //p帧
                else{
                    if(!mIsFirstI){
                        return;
                    }
                    packet = new Packet(seq,copyData,Packet.P);
                }
                //添加每帧开头的序号,用于排序和分数据包
                mSeqMap.add(seq);
            }
            //音频数据 000001c0
            else if( length > 16 &&  copyData[12] == 0 &&copyData[13] ==0 &&copyData[14] ==01 && (copyData[15]&0xff) == 0xc0){
                if(!mIsFirstI){
                    return;
                }
                mSeqMap.add(seq);
                packet = new Packet(seq,copyData,Packet.AUDIO);
            }else {
                if(!mIsFirstI){
                    return ;
                }
                packet = new Packet(seq,copyData,Packet.SUB_PACKET);
            }
            mPacketMap.put(seq, packet);
            if(mSeqMap.size() >= CACHE_FRAME_LENGTH){
                //获取最前面一帧的编号 并删除
                Integer firstSeq = mSeqMap.pop();
                //获取最前面一帧的编号 减去1 代表一帧的编号范围
                Integer endSeq = mSeqMap.getFirst()-1;
                mParser.parseUdp(mPacketMap,firstSeq,endSeq,rtmpPusher);
            }
        }catch (Exception e){
            logger.error(e.getMessage(),e);
            logger.error("UDPHandler解析异常:[{}]", HexStringUtils.toHexString(copyData));
        }
    }
}

tcp数据包缓存 SsrcTcpHandler

import com.fengyulei.fylsipserver.media.codec.CommonParser;
import com.fengyulei.fylsipserver.media.codec.Packet;
import com.fengyulei.fylsipserver.media.common.utils.BitUtils;
import com.fengyulei.fylsipserver.media.common.utils.HexStringUtils;
import com.fengyulei.fylsipserver.media.push.RtmpPusher;
import io.netty.channel.Channel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentLinkedDeque;
//和udp处理类差不多
public class SsrcTcpHandler {

    private static final Logger logger = LoggerFactory.getLogger(SsrcTcpHandler.class);

    /**
     * 存储关键数据包(i/p/audio)seq的map
     */
    private ConcurrentLinkedDeque<Integer> mSeqMap = new ConcurrentLinkedDeque<>();

    private Map<Integer,Packet> mPacketMap = new HashMap<>(60);
    /**
     * 第一帧是否为I帧
     * 不为I帧,直接丢弃
     */
    private boolean mIsFirstI;

    private String ssrc;

    private RtmpPusher rtmpPusher;

    public RtmpPusher getRtmpPusher() {
        return rtmpPusher;
    }
	//tcp每个实例都保存有一个通道,可以保存,方便关闭链接
    private Channel channel;

    public Channel getChannel() {
        return channel;
    }

    public void setChannel(Channel channel) {
        this.channel = channel;
    }
	//解释器
    private CommonParser mParser;
	//tcp缓存数据包个数少些,udp会多些,防止udp乱序
    private int CACHE_FRAME_LENGTH= 2;
	//创建tcp推流器
    public SsrcTcpHandler(CommonParser mParser, String ssrc){
        this.mParser=mParser;
        this.ssrc=ssrc;
        //推送路径可以作为参数传入,或配置文件获取
        rtmpPusher=new RtmpPusher("rtmp://192.168.1.201:1935/live/"+ssrc,ssrc,"TCP");
        rtmpPusher.startRemux();
    }

    public void read(byte[] copyData){
        int length=copyData.length;
        //截取序号,具体位置查看文章头解析rtp的博客
        int seq = BitUtils.byte2ToInt(copyData[4],copyData[5]);
        try{

            Packet packet;
            if(length > 18 && copyData[14] == 0 &&copyData[15] ==0 &&copyData[16] ==01 && (copyData[17]&0xff) == 0xba){
                int stuffingLength =  copyData[27] & 7;
                int startIndex = 27+stuffingLength+1;
                //i帧
                if(copyData[startIndex] == 0 && copyData[startIndex+1] == 0&&copyData[startIndex+2] == 01&&(copyData[startIndex+3]&0xff) == 0xbb )
                {
                    packet = new Packet(seq,copyData,Packet.I);
                    if(!mIsFirstI){
                        mIsFirstI = true;
                    }

                }
                //p帧
                else{
                    if(!mIsFirstI){
                        return;
                    }
                    packet = new Packet(seq,copyData,Packet.P);
                }
                mSeqMap.add(seq);
            }
            //音频数据
            else if( length > 18 &&  copyData[14] == 0 &&copyData[15] ==0 &&copyData[16] ==01 && (copyData[17]&0xff) == 0xc0){
                if(!mIsFirstI){
                    return;
                }
                mSeqMap.add(seq);
                packet = new Packet(seq,copyData,Packet.AUDIO);
            }else {
                if(!mIsFirstI){
                    return ;
                }
                packet = new Packet(seq,copyData,Packet.SUB_PACKET);
            }
            mPacketMap.put(seq, packet);
            if(mSeqMap.size() >= CACHE_FRAME_LENGTH){
                Integer firstSeq = mSeqMap.pop();
                Integer endSeq = mSeqMap.getFirst()-1;
                mParser.parseTcp(mPacketMap,firstSeq,endSeq,rtmpPusher);
            }
        }catch (Exception e){
            logger.error(e.getMessage(),e);
            logger.error("TCPHandler解析异常:[{}]", HexStringUtils.toHexString(copyData));
        }

    }
}

解析器

package com.fengyulei.fylsipserver.media.codec;

import java.util.Map;

import com.fengyulei.fylsipserver.media.common.utils.BitUtils;
import com.fengyulei.fylsipserver.media.common.utils.HexStringUtils;
import com.fengyulei.fylsipserver.media.push.RtmpPusher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;


/**
 * rtp ps解析
 * 解析流程参考 https://blog.csdn.net/chen495810242/article/details/39207305
 * 代码基于github上的修改 https://github.com/yangjiechina/JGB28181
 */
@Component
public  class CommonParser implements Parser{
	private Logger log = LoggerFactory.getLogger(getClass());

	//udp解析
	@Override
	public void parseUdp(Map<Integer,Packet> packetMap,int firstSeq,int endSeq,RtmpPusher rtmpPusher) throws Exception{
		parse(packetMap,firstSeq,endSeq,rtmpPusher,rtmpPusher.getType());
	}


	//tcp解析
	@Override
	public void parseTcp(Map<Integer,Packet> packetMap,int firstSeq,int endSeq,RtmpPusher rtmpPusher) throws Exception{
		parse(packetMap,firstSeq,endSeq,rtmpPusher,rtmpPusher.getType());
	}


	private void parse(Map<Integer,Packet> packetMap,int firstSeq,int endSeq,RtmpPusher rtmpPusher,String type) throws Exception{
		int remainEsLength = 0;
		int startIndex = 0;
		byte[] data=null;
		Packet packet=null;
		boolean isAudio=false;
		boolean flag=true;
		long pts=0L;
		int i=0;
		try {
			for(i = firstSeq; i<= endSeq;i++){
				packet = packetMap.remove(i);
				//根据序号取,为null时说明有丢包情况,选择丢弃退出
				if(packet == null){
					log.error("丢包");
					flag=false;
					break;
				}
				data = packet.getData();
				int packetType = packet.getPacketType();
				boolean hasSubPacket = true;
				int pesStartIndex = 0;
				//tcp头部比udp头部多两个字节,主要用于tcp分包
				if(packetType == Packet.I){
					if("UDP".equals(type)){
						pesStartIndex = getIFramePesStartIndex(data, UDP_PS_HEADER_STUFFING_LENGTH_INDEX);
					}else{
						pesStartIndex = getIFramePesStartIndex(data, TCP_PS_HEADER_STUFFING_LENGTH_INDEX);
					}

					hasSubPacket = false;
				}else if(packetType == Packet.P){
					if("UDP".equals(type)){
						pesStartIndex = getPFramePesStartIndex(data,UDP_PS_HEADER_STUFFING_LENGTH_INDEX);
					}else {
						pesStartIndex = getPFramePesStartIndex(data,TCP_PS_HEADER_STUFFING_LENGTH_INDEX);
					}

					hasSubPacket = false;
				}else if(packetType == Packet.AUDIO){
					if("UDP".equals(type)){
						pesStartIndex = UDP_START_INDEX;
					}else {
						pesStartIndex = TCP_START_INDEX;
					}

					hasSubPacket = false;
				}
				isAudio = (packetType == Packet.AUDIO);

				//统一计算 startIndex
				if(!hasSubPacket){
					pts = getPts(data, pesStartIndex);
					onPtsCallBack(pts,isAudio,rtmpPusher);
					//pes长度 前四个字节为pes头 00 00 00 e0 后面第5第6个字节代表长度
					int pesDataLength = BitUtils.byte2ToInt(data[pesStartIndex+4],data[pesStartIndex+5]);
					//pes头长度
					int pesHeaderDataLength = data[pesStartIndex+8] & 0xFF;
					//es起始索引
					startIndex = pesStartIndex+6+3+pesHeaderDataLength;
					remainEsLength = pesDataLength-3-pesHeaderDataLength;
				}else {
					if("UDP".equals(type)){
						startIndex = UDP_START_INDEX;
					}else {
						startIndex = TCP_START_INDEX;
					}

				}

				int packetLength= data.length;
				int dataLength  = packetLength - startIndex;
				//如果小于,整个包都是es数据
				if(dataLength <= remainEsLength){
					remainEsLength -= dataLength;
					onMediaStreamCallBack(data, startIndex, dataLength,isAudio,rtmpPusher);
					startIndex = 0;
					continue;
				}
				//大于则说明数据里面还包含es数据
				//先将上一个es包的数据写入通道
				//再解析下一个pes
				onMediaStreamCallBack(data, startIndex, remainEsLength,isAudio,rtmpPusher);
				startIndex+=remainEsLength;
				remainEsLength = 0;

				while(true){
					if(packetLength-startIndex<=8){
						//onMediaStreamCallBack(data, startIndex, packetLength-startIndex,isAudio,rtmpPusher);
						log.error("解析异常1:[{}],[{}]",rtmpPusher.getSsrc(),type);
						break;
					}
					//pes数据总长
					int newPesDataLength = BitUtils.byte2ToInt(data[startIndex + 4],data[startIndex+5]);
					//pes头长度
					int pesHeaderDataLength = data[startIndex+8] & 0xFF;
					//es长度
					remainEsLength = newPesDataLength - 3 - pesHeaderDataLength;

					startIndex+=8+pesHeaderDataLength+1;
					//大于等于说明剩下的包不包含es,跳出循环
					if(startIndex >= packetLength){
						break;
					}
					//数据包接收异常时会导致remainEsLength不准确
					if(remainEsLength<0){
						log.error("解析异常2:[{}],[{}]",rtmpPusher.getSsrc(),type);
						break;
					}
					//当前包剩余长度
					int packetRemainLength =  packetLength - startIndex;
					//小于等于,说明剩下的包全都是es的数据
					//写入通道,跳出循环
					if(packetRemainLength <= remainEsLength){
						onMediaStreamCallBack(data, startIndex, packetRemainLength,isAudio,rtmpPusher);
						//datas=byteMerger(datas,subBytes(data,startIndex,packetRemainLength));
						remainEsLength-= packetRemainLength;
						break;
					}
					//大于es长度,说明还有  pes
					//写入通道,继续循环
					onMediaStreamCallBack(data, startIndex, remainEsLength,isAudio,rtmpPusher);
					startIndex+=remainEsLength;
				}
				startIndex = 0;
			}
			if(!flag){
				//异常需要清理map,防止内存溢出
				for(i = i+1; i<= endSeq;i++){
					if(i>endSeq){
						break;
					}
					packetMap.remove(i);
				}
			}

		}catch (Exception e){

			log.error("startIndex:{}",startIndex);
			log.error("解包异常:{}",HexStringUtils.toHexString(data));
			log.error(e.getMessage(),e);

			//异常需要清理map
			for(i = i+1; i<= endSeq;i++){
				if(i>endSeq){
					break;
				}
				packetMap.remove(i);
			}
		}
	}

	/**
	 * 计算获取i帧的pes start index
	 * @param data
	 * @param stuffingLengthIndex
	 * @return
	 */
	private int getIFramePesStartIndex(byte[] data,int stuffingLengthIndex){

		//ps头中STUFFING长度,扩充扩展字段,直接跳过。
		int stuffingLength =  data[stuffingLengthIndex] & 7;
		//psSystem头开始索引
		int psSystemHeaderStartIndex =  stuffingLengthIndex+stuffingLength+1;

		int psSystemHeaderLength=0;
		if((data[psSystemHeaderStartIndex+3]&0xff)==0xbb){
			//psSystem头长度,00 00 00 bb
			 psSystemHeaderLength = BitUtils.byte2ToInt(data[psSystemHeaderStartIndex + 4],data[psSystemHeaderStartIndex+5]);
		}else{
			log.error("没有系统头");
		}


		int psMapHeaderStartIndex = psSystemHeaderStartIndex + 6 +psSystemHeaderLength;

		//psMap头长度,00 00 00 bc
		int psMapHeaderLength = BitUtils.byte2ToInt(data[psMapHeaderStartIndex+4],data[psMapHeaderStartIndex+5]);

		//pes开始索引,00 00 01 e0
		return psMapHeaderStartIndex+6+psMapHeaderLength;
	}
	/**
	 * 计算获取p帧的pes start index
	 * @param data
	 * @param stuffingLengthIndex
	 * @return
	 */
	private int getPFramePesStartIndex(byte[] data,int stuffingLengthIndex){

		//ps头中STUFFING长度直接跳过,扩充扩展字段。
		int stuffingLength =  data[stuffingLengthIndex] & 7;
		return stuffingLengthIndex+stuffingLength+1;
	}
	/**
	 * 计算获取 pts
	 * @param data 源数据
	 * @param pesStartIndex pes头起始索引
	 * @return
	 */
	private long getPts(byte[] data,int pesStartIndex){
		if(data != null && data.length >= pesStartIndex){
			try{
				//检查pes头中是否包含pts、dts
				//i帧中的pes头至少需要携带pts
				//pts_dts flags
				byte ptsDtsFlags =  (byte) ((data[pesStartIndex+7]&0xff) >> 6 & 0x3);

				//`11` pts、dts都有
				//`10` pts
				//00 都没有
				if(ptsDtsFlags == 0x3 || ptsDtsFlags==0x2){

					//pts[32-30]
					int bitHight32_30 = data[pesStartIndex+10]&0xff & 0xE;
					//pts[29-15]
					int bitHight29_15 = BitUtils.byte2ToInt(data[pesStartIndex+10],data[pesStartIndex+11])&0xFFFE;
					//pts[14-0]					 
					int bitHight14_0 = BitUtils.byte2ToInt(data[pesStartIndex+12],data[pesStartIndex+13])&0xFFFE;

					long pts = (long)(bitHight32_30 << 29) + (long)(bitHight29_15<<14) + (long)(bitHight14_0>>1);
					if(pts < 0){
						//System.out.println(HexStringUtils.toHexString(data));
					}
					//log.info("解析后的pts >>> {}",pts);
					return pts;
				}
			}catch(Exception e){
				e.printStackTrace();
			}
		}
		return -1;
	}
	private void onMediaStreamCallBack(byte[] data,int offset,int length,boolean isAudio,RtmpPusher rtmpPusher){

		if(rtmpPusher==null){
			return;
		}
		rtmpPusher.onMediaStream(data,offset,length,isAudio);
	}

	private void onPtsCallBack(long pts,boolean isAudio,RtmpPusher rtmpPusher){
		if(rtmpPusher==null){
			return;
		}
		rtmpPusher.onPts(pts,isAudio);
	}

	public static void main(String[] args) throws Exception{
		CommonParser commonParser=new CommonParser();
		String str="806042FB01EE9AC005F5E104000001BA4DC46C65140101399FFEFFFF0129D1CC000001E0059E8C800A23711B1945FFFFFFFFF800000001219A00080011970E87B5E967209E98369516B86F9D6FC15454FA083F0835B9651EAE0DA777607A6C8932A96821272555D8DFC04EDB3DFE53A9438DC7C0A49B0EEE31F7D6DD8457EC10E626C8DC9BAD22AD4F176C3EFED63C18EE164F4A61E05FC1FCA9858F6CDE9982858208E684E61741A87CEC822F1AB5A19719D7D93CBA937ACC7675E3DAB30B1A2FBF0DDFCAAA0A9ABCC851FBF075FF9C9A7662852B0E5E371751B02AB24F1F93F2F1DEA98C714AB9310716B0E5F29BD37FC2D7148824A997EDB7F071F94579EAE4B9CDFE3FB7757FE03ED30BFB38E223D2AFFA0F1ED8D387196567381EA4B06188BDCE63C574D6545560ED2188E8C3511662FB18A5A9D570DB595206F1269C0B0214CB9CD7EA11738D953A70BEDCE1C475BC1EB80C302CAD1DE1BBF6417035656F106C6B74A55833DEEFB0A64E71F56F2CF3761F9D736EA6B96D74E5B809B59418963063F288880D272AA418EF57E3BB92E466586DE2E1AF4DC89D037133B6577A5FB9BFE28AF1D5629DC9C3043BF84F47A7FAD88519369DCC47AC3AD3976579D84C7C76412AE79AF628D86AD0433752989DFAE64F0CC658BB184E76D618FFB2FBBC801D252E721625D46B05E7DF6812B275AE091491E4111914AC84D61E775DF1BCDF2E759B34B74EF7B4FC17A8BED9A75A08B4E039D8D8ABF0E705B21209BD0FE8BE9BC71DE78D68A884C59BCD33D94C311CCA2B19860EBB53B580042DB35423ED26B4A351EBAA89A69602B6A6B86112F26D0538C37DFB015439D12F225187870EC4A0D96EACBCB28451FAC45A20F43DD7A2C24B1EFEC1BA7A973C341727B9376B88C8EF0E1AF7AF4EC732E241F2D9FCFDEDED066A7B0DC0F008C9BE7C6676364196B6EEA150988206AFF5E14CE2EF4B31E2CECBB3A44EB5C6132CA250B93AAE3D235038C6A0798B1D7AD0C65E2BBF881E7275B774C738873FE40022BF9DB05F400001E3DBF5F0917B22238E6CB1886D570174A5000003000003000732DAB289DE803AE562BC1B7ACCCD09B48007BAE2E39275319CA9B18C758598A2C1A8A9BFECFF49A6DC1172AC5DAAA330688CC99D5551C1E024F0C85BAF1E33137C283594FB20047888308136C4FF45D0A7F4A9B957AE5246D05521894FCEAF68BB2C666D272613FEC0E2B6E119A698F6D5DC9822BD735193E107B4A637C91CCE133CDFBB054257A5FF563A5F970B5EC2B170E05F24AF1A19BF478EFC182906695B770904AD9FE26476DD4FD3006F770ABD09C61B04C69E01EE362DA3C4C8E24E10B97AD3E461F84AB469CFCF7103215B79FBF3396BD2328D2998F31910CDD361CC2605B3CF2E742E106080725C823C0ED477E55C04DC108F8F6C3491F09907CB27F81B13A3A8D833EC8A8986EE0F801D1199ADE0B486917843B1CCE405F3976F7C6CB4004B0522F9E75894AA80B6C9373B4B822FA326AFB901070AF9A0A6CD7DD82D4DFF4A988042D67AAAF56C747275A4D445983236EEAF01EC5F647EF1A60901461AA377827AF3B017CA532410F8309A0AE90610D477E7C805B3A3891271832A0EE6FAF31E07D05157BCDE52255E1E33FC873C8B39FA650EB5BFA0EA857811BE45D8F0ED214DE88488E4BFFA589EFA6CAC8F6EDFDE29611152BB939157518B80F5834CE81DD347A39D18F68D0828A42EA3E2BE6D6A4F0014D4729C6B6A8F6BB51A43EBFBA9E360DEFBE91C541DE53E0E2BA112C9732AA7705DCE62340B8E9C49FF46A1DBC79F98644B07F02B033C869F2AACE338E8CF2B7E5FFC35A90860F1826B49818280810593E1B7588AEC11DB51A5CFF6DA1B18E510E1E6AEBFA9028809543224FA810EEACBC26C8F828965939F79C0C497AB252439FC196DE58DD1FFBFDE349C4E048EC9FCC8BB2C2B98A09CC50E904F8079A6985B0FAEC13C21260EB5B37DE0B102D2";
		byte[] data= HexStringUtils.chars2Bytes(str.toCharArray());
		System.out.println("总共字节长度为:"+data.length);
		int pesStartIndex = commonParser.getPFramePesStartIndex(data,UDP_PS_HEADER_STUFFING_LENGTH_INDEX);

		System.out.println((data[pesStartIndex+3]&0xff)==0xe0);
		//00 00 01 e0 --->0 0 1 224
		System.out.println("pes包头开始下标:"+pesStartIndex+"--->"+HexStringUtils.byte2String(data[pesStartIndex])+HexStringUtils.byte2String(data[pesStartIndex+1])+HexStringUtils.byte2String(data[pesStartIndex+2])+HexStringUtils.byte2String(data[pesStartIndex+3]));

		//pes长度
		int pesDataLength = BitUtils.byte2ToInt(data[pesStartIndex+4],data[pesStartIndex+5]);
		System.out.println("pes长度:"+pesDataLength);

		//pes头长度
		int pesHeaderDataLength = data[pesStartIndex+8] & 0xFF;
		System.out.println("头长度:"+pesHeaderDataLength);

		//es起始索引 2 6
		int startIndex = pesStartIndex+6+3+pesHeaderDataLength;
		System.out.println("es起始索引:"+startIndex);
		int remainEsLength = pesDataLength-3-pesHeaderDataLength;
		System.out.println("es数据长度:"+remainEsLength);



		int packetLength= data.length;
		int dataLength  = packetLength - startIndex;
		//如果小于,整个包都是es数据
		if(dataLength <= remainEsLength){
			remainEsLength -= dataLength;
			//onMediaStreamCallBack(data, startIndex, dataLength,isAudio,rtmpPusher);
			startIndex = 0;
			return;
			//continue;
		}
		//大于则说明数据里面还包含es数据
		//先将上一个es包的数据写入通道
		//再解析下一个pes
		//onMediaStreamCallBack(data, startIndex, remainEsLength,isAudio,rtmpPusher);
		startIndex+=remainEsLength;
		remainEsLength = 0;
		while(true){
			//新的pes数据总长
			int newPesDataLength = BitUtils.byte2ToInt(data[startIndex + 4],data[startIndex+5]);
			//新的pes头长度
			 pesHeaderDataLength = data[startIndex+8] & 0xFF;
			//es长度
			remainEsLength = newPesDataLength - 3 - pesHeaderDataLength;

			startIndex+=8+pesHeaderDataLength+1;
			//大于等于说明剩下的包不包含es,跳出循环
			if(startIndex >= packetLength){
				break;
			}
			//当前包剩余长度
			int packetRemainLength =  packetLength - startIndex;
			//小于等于,说明剩下的包全都是es的数据
			//写入通道,跳出循环
			if(packetRemainLength <= remainEsLength){
				//onMediaStreamCallBack(data, startIndex, packetRemainLength,isAudio,rtmpPusher);
				remainEsLength-= packetRemainLength;
				break;
			}
			//大于es长度,说明还有  pes
			//写入通道,继续循环
			//onMediaStreamCallBack(data, startIndex, remainEsLength,isAudio,rtmpPusher);
			startIndex+=remainEsLength;
		}
		startIndex = 0;
	}
}

推流器

加入javacv相关依赖

<!-- javacv 和 ffmpeg -->
<dependency>
	<groupId>org.bytedeco</groupId>
	<artifactId>javacv</artifactId>
	<version>1.4.4</version>
</dependency>
<dependency>
	<groupId>org.bytedeco</groupId>
	<artifactId>javacpp</artifactId>
	<version>1.4.4</version>
</dependency>
<dependency>
	<groupId>org.bytedeco.javacpp-presets</groupId>
	<artifactId>ffmpeg</artifactId>
	<version>4.1-1.4.4</version>
</dependency>
<dependency>
	<groupId>org.bytedeco.javacpp-presets</groupId>
	<artifactId>ffmpeg-platform</artifactId>
	<version>4.1-1.4.4</version>
</dependency>

CustomFFmpegFrameRecorder 覆盖原来的FFmpegFrameRecorder

package com.fengyulei.fylsipserver.media.push;

import static org.bytedeco.javacpp.avcodec.AV_CODEC_CAP_EXPERIMENTAL;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_FLAG_GLOBAL_HEADER;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_FLAG_QSCALE;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_AAC;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_FFV1;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_FLV1;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_H263;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_H264;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_HUFFYUV;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_JPEGLS;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_MJPEG;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_MJPEGB;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_MPEG1VIDEO;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_MPEG2VIDEO;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_MPEG4;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_NONE;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_PCM_S16BE;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_PCM_S16LE;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_PCM_U16BE;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_PCM_U16LE;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_PNG;
import static org.bytedeco.javacpp.avcodec.AV_CODEC_ID_RAWVIDEO;
import static org.bytedeco.javacpp.avcodec.AV_INPUT_BUFFER_MIN_SIZE;
import static org.bytedeco.javacpp.avcodec.AV_PKT_FLAG_KEY;
import static org.bytedeco.javacpp.avcodec.av_init_packet;
import static org.bytedeco.javacpp.avcodec.av_jni_set_java_vm;
import static org.bytedeco.javacpp.avcodec.av_packet_unref;
import static org.bytedeco.javacpp.avcodec.avcodec_alloc_context3;
import static org.bytedeco.javacpp.avcodec.avcodec_copy_context;
import static org.bytedeco.javacpp.avcodec.avcodec_encode_audio2;
import static org.bytedeco.javacpp.avcodec.avcodec_encode_video2;
import static org.bytedeco.javacpp.avcodec.avcodec_fill_audio_frame;
import static org.bytedeco.javacpp.avcodec.avcodec_find_encoder;
import static org.bytedeco.javacpp.avcodec.avcodec_find_encoder_by_name;
import static org.bytedeco.javacpp.avcodec.avcodec_free_context;
import static org.bytedeco.javacpp.avcodec.avcodec_open2;
import static org.bytedeco.javacpp.avcodec.avcodec_parameters_from_context;
import static org.bytedeco.javacpp.avcodec.avcodec_register_all;
import static org.bytedeco.javacpp.avdevice.avdevice_register_all;
import static org.bytedeco.javacpp.avformat.AVFMT_GLOBALHEADER;
import static org.bytedeco.javacpp.avformat.AVFMT_NOFILE;
import static org.bytedeco.javacpp.avformat.AVIO_FLAG_WRITE;
import static org.bytedeco.javacpp.avformat.av_dump_format;
import static org.bytedeco.javacpp.avformat.av_guess_format;
import static org.bytedeco.javacpp.avformat.av_interleaved_write_frame;
import static org.bytedeco.javacpp.avformat.av_register_all;
import static org.bytedeco.javacpp.avformat.av_write_frame;
import static org.bytedeco.javacpp.avformat.av_write_trailer;
import static org.bytedeco.javacpp.avformat.avformat_alloc_output_context2;
import static org.bytedeco.javacpp.avformat.avformat_network_init;
import static org.bytedeco.javacpp.avformat.avformat_new_stream;
import static org.bytedeco.javacpp.avformat.avformat_write_header;
import static org.bytedeco.javacpp.avformat.avio_alloc_context;
import static org.bytedeco.javacpp.avformat.avio_close;
import static org.bytedeco.javacpp.avformat.avio_open2;
import static org.bytedeco.javacpp.avutil.*;
import static org.bytedeco.javacpp.swresample.swr_alloc_set_opts;
import static org.bytedeco.javacpp.swresample.swr_convert;
import static org.bytedeco.javacpp.swresample.swr_free;
import static org.bytedeco.javacpp.swresample.swr_init;
import static org.bytedeco.javacpp.swscale.SWS_BILINEAR;
import static org.bytedeco.javacpp.swscale.sws_freeContext;
import static org.bytedeco.javacpp.swscale.sws_getCachedContext;
import static org.bytedeco.javacpp.swscale.sws_scale;

import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.DoubleBuffer;
import java.nio.FloatBuffer;
import java.nio.IntBuffer;
import java.nio.ShortBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;

import org.bytedeco.javacpp.BytePointer;
import org.bytedeco.javacpp.DoublePointer;
import org.bytedeco.javacpp.FloatPointer;
import org.bytedeco.javacpp.IntPointer;
import org.bytedeco.javacpp.Loader;
import org.bytedeco.javacpp.Pointer;
import org.bytedeco.javacpp.PointerPointer;
import org.bytedeco.javacpp.PointerScope;
import org.bytedeco.javacpp.ShortPointer;
import org.bytedeco.javacpp.avcodec.AVCodec;
import org.bytedeco.javacpp.avcodec.AVCodecContext;
import org.bytedeco.javacpp.avcodec.AVPacket;
import org.bytedeco.javacpp.avformat.AVFormatContext;
import org.bytedeco.javacpp.avformat.AVIOContext;
import org.bytedeco.javacpp.avformat.AVOutputFormat;
import org.bytedeco.javacpp.avformat.AVStream;
import org.bytedeco.javacpp.avformat.Write_packet_Pointer_BytePointer_int;
import org.bytedeco.javacpp.avutil.AVDictionary;
import org.bytedeco.javacpp.avutil.AVFrame;
import org.bytedeco.javacpp.avutil.AVRational;
import org.bytedeco.javacpp.swresample.SwrContext;
import org.bytedeco.javacpp.swscale.SwsContext;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.FFmpegLockCallback;
import org.bytedeco.javacv.Frame;
import org.bytedeco.javacv.FrameRecorder;

@SuppressWarnings("all")
public class CustomFFmpegFrameRecorder extends FrameRecorder {

	public static CustomFFmpegFrameRecorder createDefault(File f, int w, int h)   throws Exception { return new CustomFFmpegFrameRecorder(f, w, h); }
	public static CustomFFmpegFrameRecorder createDefault(String f, int w, int h) throws Exception { return new CustomFFmpegFrameRecorder(f, w, h); }

	private static Exception loadingException = null;
	public static void tryLoad() throws Exception {
		if (loadingException != null) {
			throw loadingException;
		} else {
			try {
				Loader.load(org.bytedeco.javacpp.avutil.class);
				Loader.load(org.bytedeco.javacpp.swresample.class);
				Loader.load(org.bytedeco.javacpp.avcodec.class);
				Loader.load(org.bytedeco.javacpp.avformat.class);
				Loader.load(org.bytedeco.javacpp.swscale.class);

				/* initialize libavcodec, and register all codecs and formats */
				av_jni_set_java_vm(Loader.getJavaVM(), null);
				avcodec_register_all();
				av_register_all();
				avformat_network_init();

				Loader.load(org.bytedeco.javacpp.avdevice.class);
				avdevice_register_all();
			} catch (Throwable t) {
				if (t instanceof Exception) {
					throw loadingException = (Exception)t;
				} else {
					throw loadingException = new Exception("Failed to load " + FFmpegFrameRecorder.class, t);
				}
			}
		}
	}

	static {
		try {
			tryLoad();
			FFmpegLockCallback.init();
		} catch (Exception ex) { }
	}

	public CustomFFmpegFrameRecorder(File file, int audioChannels) {
		this(file, 0, 0, audioChannels);
	}
	public CustomFFmpegFrameRecorder(String filename, int audioChannels) {
		this(filename, 0, 0, audioChannels);
	}
	public CustomFFmpegFrameRecorder(File file, int imageWidth, int imageHeight) {
		this(file, imageWidth, imageHeight, 0);
	}
	public CustomFFmpegFrameRecorder(String filename, int imageWidth, int imageHeight) {
		this(filename, imageWidth, imageHeight, 0);
	}
	public CustomFFmpegFrameRecorder(File file, int imageWidth, int imageHeight, int audioChannels) {
		this(file.getAbsolutePath(), imageWidth, imageHeight, audioChannels);
	}
	public CustomFFmpegFrameRecorder(String filename, int imageWidth, int imageHeight, int audioChannels) {
		this.filename      = filename;
		this.imageWidth    = imageWidth;
		this.imageHeight   = imageHeight;
		this.audioChannels = audioChannels;

		this.pixelFormat   = AV_PIX_FMT_NONE;
		this.videoCodec    = AV_CODEC_ID_NONE;
		this.videoBitrate  = 400000;
		this.frameRate     = 30;

		this.sampleFormat  = AV_SAMPLE_FMT_NONE;
		this.audioCodec    = AV_CODEC_ID_NONE;
		this.audioBitrate  = 64000;
		this.sampleRate    = 44100;

		this.interleaved = true;

		this.video_pkt = new AVPacket();
		this.audio_pkt = new AVPacket();
	}
	public CustomFFmpegFrameRecorder(OutputStream outputStream, int audioChannels) {
		this(outputStream.toString(), audioChannels);
		this.outputStream = outputStream;
	}
	public CustomFFmpegFrameRecorder(OutputStream outputStream, int imageWidth, int imageHeight) {
		this(outputStream.toString(), imageWidth, imageHeight);
		this.outputStream = outputStream;
	}
	public CustomFFmpegFrameRecorder(OutputStream outputStream, int imageWidth, int imageHeight, int audioChannels) {
		this(outputStream.toString(), imageWidth, imageHeight, audioChannels);
		this.outputStream = outputStream;
	}
	public void release() throws Exception {
		// synchronized (org.bytedeco.javacpp.avcodec.class) {
		releaseUnsafe();
		// }
	}
	void releaseUnsafe() throws Exception {
		/* close each codec */
		if (video_c != null) {
			avcodec_free_context(video_c);
			video_c = null;
		}
		if (audio_c != null) {
			avcodec_free_context(audio_c);
			audio_c = null;
		}
		if (picture_buf != null) {
			av_free(picture_buf);
			picture_buf = null;
		}
		if (picture != null) {
			av_frame_free(picture);
			picture = null;
		}
		if (tmp_picture != null) {
			av_frame_free(tmp_picture);
			tmp_picture = null;
		}
		if (video_outbuf != null) {
			av_free(video_outbuf);
			video_outbuf = null;
		}
		if (frame != null) {
			av_frame_free(frame);
			frame = null;
		}
		if (samples_out != null) {
			for (int i = 0; i < samples_out.length; i++) {
				av_free(samples_out[i].position(0));
			}
			samples_out = null;
		}
		if (audio_outbuf != null) {
			av_free(audio_outbuf);
			audio_outbuf = null;
		}
		if (video_st != null && video_st.metadata() != null) {
			av_dict_free(video_st.metadata());
			video_st.metadata(null);
		}
		if (audio_st != null && audio_st.metadata() != null) {
			av_dict_free(audio_st.metadata());
			audio_st.metadata(null);
		}
		video_st = null;
		audio_st = null;
		filename = null;

		AVFormatContext outputStreamKey = oc;
		if (oc != null && !oc.isNull()) {
			if (outputStream == null && (oformat.flags() & AVFMT_NOFILE) == 0) {
				/* close the output file */
				avio_close(oc.pb());
			}

			/* free the streams */
			int nb_streams = oc.nb_streams();
			for(int i = 0; i < nb_streams; i++) {
				av_free(oc.streams(i).codec());
				av_free(oc.streams(i));
			}

			/* free metadata */
			if (oc.metadata() != null) {
				av_dict_free(oc.metadata());
				oc.metadata(null);
			}

			/* free the stream */
			av_free(oc);
			oc = null;
		}

		if (img_convert_ctx != null) {
			sws_freeContext(img_convert_ctx);
			img_convert_ctx = null;
		}

		if (samples_convert_ctx != null) {
			swr_free(samples_convert_ctx);
			samples_convert_ctx = null;
		}

		if (outputStream != null) {
			try {
				outputStream.close();
			} catch (IOException ex) {
				throw new Exception("Error on OutputStream.close(): ", ex);
			} finally {
				outputStream = null;
				outputStreams.remove(outputStreamKey);
				if (avio != null) {
					if (avio.buffer() != null) {
						av_free(avio.buffer());
						avio.buffer(null);
					}
					av_free(avio);
					avio = null;
				}
			}
		}
	}
	@Override protected void finalize() throws Throwable {
		super.finalize();
		release();
	}

	static Map<Pointer,OutputStream> outputStreams = Collections.synchronizedMap(new HashMap<Pointer,OutputStream>());

	static class WriteCallback extends Write_packet_Pointer_BytePointer_int {
		@Override public int call(Pointer opaque, BytePointer buf, int buf_size) {
			try {
				byte[] b = new byte[buf_size];
				OutputStream os = outputStreams.get(opaque);
				buf.get(b, 0, buf_size);
				os.write(b, 0, buf_size);
				return buf_size;
			}
			catch (Throwable t) {
				System.err.println("Error on OutputStream.write(): " + t);
				return -1;
			}
		}
	}

	static WriteCallback writeCallback = new WriteCallback();
	static {
		PointerScope s = PointerScope.getInnerScope();
		if (s != null) {
			s.detach(writeCallback);
		}
	}

	private OutputStream outputStream;
	private AVIOContext avio;
	private String filename;
	private AVFrame picture, tmp_picture;
	private BytePointer picture_buf;
	private BytePointer video_outbuf;
	private int video_outbuf_size;
	private AVFrame frame;
	private Pointer[] samples_in;
	private BytePointer[] samples_out;
	private PointerPointer samples_in_ptr;
	private PointerPointer samples_out_ptr;
	private BytePointer audio_outbuf;
	private int audio_outbuf_size;
	private int audio_input_frame_size;
	private AVOutputFormat oformat;
	private AVFormatContext oc;
	private AVCodec video_codec, audio_codec;
	private AVCodecContext video_c, audio_c;
	private AVStream video_st, audio_st;
	private SwsContext img_convert_ctx;
	private SwrContext samples_convert_ctx;
	private int samples_channels, samples_format, samples_rate;
	private AVPacket video_pkt, audio_pkt;
	private int[] got_video_packet, got_audio_packet;
	private AVFormatContext ifmt_ctx;

	@Override public int getFrameNumber() {
		return picture == null ? super.getFrameNumber() : (int)picture.pts();
	}
	@Override public void setFrameNumber(int frameNumber) {
		if (picture == null) { super.setFrameNumber(frameNumber); } else { picture.pts(frameNumber); }
	}

	// best guess for timestamp in microseconds...
	@Override public long getTimestamp() {
		return Math.round(getFrameNumber() * 1000000L / getFrameRate());
	}
	@Override public void setTimestamp(long timestamp)  {
		setFrameNumber((int)Math.round(timestamp * getFrameRate() / 1000000L));
	}

	public void start(AVFormatContext ifmt_ctx) throws Exception {
		this.ifmt_ctx = ifmt_ctx;
		start();
	}

	public void start() throws Exception {
		// synchronized (org.bytedeco.javacpp.avcodec.class) {
		startUnsafe();
		// }
	}

	void startUnsafe() throws Exception {
		int ret;
		picture = null;
		tmp_picture = null;
		picture_buf = null;
		frame = null;
		video_outbuf = null;
		audio_outbuf = null;
		oc = new AVFormatContext(null);
		video_c = null;
		audio_c = null;
		video_st = null;
		audio_st = null;
		got_video_packet = new int[1];
		got_audio_packet = new int[1];

		/* auto detect the output format from the name. */
		String format_name = format == null || format.length() == 0 ? null : format;
		if ((oformat = av_guess_format(format_name, filename, null)) == null) {
			int proto = filename.indexOf("://");
			if (proto > 0) {
				format_name = filename.substring(0, proto);
			}
			if ((oformat = av_guess_format(format_name, filename, null)) == null) {
				throw new Exception("av_guess_format() error: Could not guess output format for \"" + filename + "\" and " + format + " format.");
			}
		}
		format_name = oformat.name().getString();

		/* allocate the output media context */
		if (avformat_alloc_output_context2(oc, null, format_name, filename) < 0) {
			throw new Exception("avformat_alloc_context2() error:\tCould not allocate format context");
		}

		if (outputStream != null) {
			avio = avio_alloc_context(new BytePointer(av_malloc(4096)), 4096, 1, oc, null, writeCallback, null);
			oc.pb(avio);

			filename = outputStream.toString();
			outputStreams.put(oc, outputStream);
		}
		oc.oformat(oformat);
		oc.filename().putString(filename);
		oc.max_delay(maxDelay);

		/* add the audio and video streams using the format codecs
           and initialize the codecs */
		AVStream inpVideoStream = null, inpAudioStream = null;
		if (ifmt_ctx != null) {
			// get input video and audio stream indices from ifmt_ctx
			for (int idx = 0; idx < ifmt_ctx.nb_streams(); idx++) {
				AVStream inputStream = ifmt_ctx.streams(idx);
				if (inputStream.codec().codec_type() == AVMEDIA_TYPE_VIDEO) {
					inpVideoStream = inputStream;
					videoCodec = inpVideoStream.codec().codec_id();
					if (inpVideoStream.r_frame_rate().num() != AV_NOPTS_VALUE && inpVideoStream.r_frame_rate().den() != 0) {
						frameRate = (inpVideoStream.r_frame_rate().num()) / (inpVideoStream.r_frame_rate().den());
					}

				} else if (inputStream.codec().codec_type() == AVMEDIA_TYPE_AUDIO) {
					inpAudioStream = inputStream;
					audioCodec = inpAudioStream.codec().codec_id();
				}
			}
		}

		if (imageWidth > 0 && imageHeight > 0) {
			if (videoCodec != AV_CODEC_ID_NONE) {
				oformat.video_codec(videoCodec);
			} else if ("flv".equals(format_name)) {
				oformat.video_codec(AV_CODEC_ID_FLV1);
			} else if ("mp4".equals(format_name)) {
				oformat.video_codec(AV_CODEC_ID_MPEG4);
			} else if ("3gp".equals(format_name)) {
				oformat.video_codec(AV_CODEC_ID_H263);
			} else if ("avi".equals(format_name)) {
				oformat.video_codec(AV_CODEC_ID_HUFFYUV);
			}

			/* find the video encoder */
			if ((video_codec = avcodec_find_encoder_by_name(videoCodecName)) == null &&
					(video_codec = avcodec_find_encoder(oformat.video_codec())) == null) {
				release();
				throw new Exception("avcodec_find_encoder() error: Video codec not found.");
			}
			oformat.video_codec(video_codec.id());

			AVRational frame_rate = av_d2q(frameRate, 1001000);
			AVRational supported_framerates = video_codec.supported_framerates();
			if (supported_framerates != null) {
				int idx = av_find_nearest_q_idx(frame_rate, supported_framerates);
				frame_rate = supported_framerates.position(idx);
			}

			/* add a video output stream */
			if ((video_st = avformat_new_stream(oc, null)) == null) {
				release();
				throw new Exception("avformat_new_stream() error: Could not allocate video stream.");
			}

			if ((video_c = avcodec_alloc_context3(video_codec)) == null) {
				release();
				throw new Exception("avcodec_alloc_context3() error: Could not allocate video encoding context.");
			}

			if (inpVideoStream != null) {
				if ((ret = avcodec_copy_context(video_st.codec(), inpVideoStream.codec())) < 0) {
					release();
					throw new Exception("avcodec_copy_context() error:\tFailed to copy context from input to output stream codec context");
				}

				videoBitrate = (int) inpVideoStream.codec().bit_rate();
				pixelFormat = inpVideoStream.codec().pix_fmt();
				aspectRatio = inpVideoStream.codec().sample_aspect_ratio().den() / inpVideoStream.codec().sample_aspect_ratio().den() * 1.d;
				videoQuality = inpVideoStream.codec().global_quality();
				video_c.codec_tag(0);
			}

			video_c.codec_id(oformat.video_codec());
			video_c.codec_type(AVMEDIA_TYPE_VIDEO);


			/* put sample parameters */
			video_c.bit_rate(videoBitrate);
			/* resolution must be a multiple of two. Scale height to maintain the aspect ratio. */
			if (imageWidth % 2 == 1) {
				int roundedWidth = imageWidth + 1;
				imageHeight = (roundedWidth * imageHeight + imageWidth / 2) / imageWidth;
				imageWidth = roundedWidth;
			}
			video_c.width(imageWidth);
			video_c.height(imageHeight);
			if (aspectRatio > 0) {
				AVRational r = av_d2q(aspectRatio, 255);
				video_c.sample_aspect_ratio(r);
				video_st.sample_aspect_ratio(r);
			}
			/* time base: this is the fundamental unit of time (in seconds) in terms
               of which frame timestamps are represented. for fixed-fps content,
               timebase should be 1/framerate and timestamp increments should be
               identically 1. */
			AVRational time_base = av_inv_q(frame_rate);
			video_c.time_base(time_base);
			video_st.time_base(time_base);
			video_st.avg_frame_rate(frame_rate);
			video_st.codec().time_base(time_base); // "deprecated", but this is actually required
			if (gopSize >= 0) {
				video_c.gop_size(gopSize); /* emit one intra frame every gopSize frames at most */
			}
			if (videoQuality >= 0) {
				video_c.flags(video_c.flags() | AV_CODEC_FLAG_QSCALE);
				video_c.global_quality((int)Math.round(FF_QP2LAMBDA * videoQuality));
			}

			if (pixelFormat != AV_PIX_FMT_NONE) {
				video_c.pix_fmt(pixelFormat);
			} else if (video_c.codec_id() == AV_CODEC_ID_RAWVIDEO || video_c.codec_id() == AV_CODEC_ID_PNG ||
					video_c.codec_id() == AV_CODEC_ID_HUFFYUV  || video_c.codec_id() == AV_CODEC_ID_FFV1) {
				video_c.pix_fmt(AV_PIX_FMT_RGB32);   // appropriate for common lossless formats
			} else if (video_c.codec_id() == AV_CODEC_ID_JPEGLS) {
				video_c.pix_fmt(AV_PIX_FMT_BGR24);
			} else if (video_c.codec_id() == AV_CODEC_ID_MJPEG || video_c.codec_id() == AV_CODEC_ID_MJPEGB) {
				video_c.pix_fmt(AV_PIX_FMT_YUVJ420P);
			} else {
				video_c.pix_fmt(AV_PIX_FMT_YUV420P); // lossy, but works with about everything
			}

			if (video_c.codec_id() == AV_CODEC_ID_MPEG2VIDEO) {
				/* just for testing, we also add B frames */
				video_c.max_b_frames(2);
			} else if (video_c.codec_id() == AV_CODEC_ID_MPEG1VIDEO) {
				/* Needed to avoid using macroblocks in which some coeffs overflow.
                   This does not happen with normal video, it just happens here as
                   the motion of the chroma plane does not match the luma plane. */
				video_c.mb_decision(2);
			} else if (video_c.codec_id() == AV_CODEC_ID_H263) {
				// H.263 does not support any other resolution than the following
				if (imageWidth <= 128 && imageHeight <= 96) {
					video_c.width(128).height(96);
				} else if (imageWidth <= 176 && imageHeight <= 144) {
					video_c.width(176).height(144);
				} else if (imageWidth <= 352 && imageHeight <= 288) {
					video_c.width(352).height(288);
				} else if (imageWidth <= 704 && imageHeight <= 576) {
					video_c.width(704).height(576);
				} else {
					video_c.width(1408).height(1152);
				}
			} else if (video_c.codec_id() == AV_CODEC_ID_H264) {
				// default to constrained baseline to produce content that plays back on anything,
				// without any significant tradeoffs for most use cases
				video_c.profile(AVCodecContext.FF_PROFILE_H264_CONSTRAINED_BASELINE);
			}

			// some formats want stream headers to be separate
			if ((oformat.flags() & AVFMT_GLOBALHEADER) != 0) {
				video_c.flags(video_c.flags() | AV_CODEC_FLAG_GLOBAL_HEADER);
			}

			if ((video_codec.capabilities() & AV_CODEC_CAP_EXPERIMENTAL) != 0) {
				video_c.strict_std_compliance(AVCodecContext.FF_COMPLIANCE_EXPERIMENTAL);
			}

			if (maxBFrames >= 0) {
				video_c.max_b_frames(maxBFrames);
				video_c.has_b_frames(maxBFrames == 0 ? 0 : 1);
			}

			if (trellis >= 0) {
				video_c.trellis(trellis);
			}
		}

		/*
		 * add an audio output stream
		 */
		if (audioChannels > 0 && audioBitrate > 0 && sampleRate > 0) {
			if (audioCodec != AV_CODEC_ID_NONE) {
				oformat.audio_codec(audioCodec);
			} else if ("flv".equals(format_name) || "mp4".equals(format_name) || "3gp".equals(format_name)) {
				oformat.audio_codec(AV_CODEC_ID_AAC);
			} else if ("avi".equals(format_name)) {
				oformat.audio_codec(AV_CODEC_ID_PCM_S16LE);
			}

			/* find the audio encoder */
			if ((audio_codec = avcodec_find_encoder_by_name(audioCodecName)) == null &&
					(audio_codec = avcodec_find_encoder(oformat.audio_codec())) == null) {
				release();
				throw new Exception("avcodec_find_encoder() error: Audio codec not found.");
			}
			oformat.audio_codec(audio_codec.id());

			if ((audio_st = avformat_new_stream(oc, null)) == null) {
				release();
				throw new Exception("avformat_new_stream() error: Could not allocate audio stream.");
			}

			if ((audio_c = avcodec_alloc_context3(audio_codec)) == null) {
				release();
				throw new Exception("avcodec_alloc_context3() error: Could not allocate audio encoding context.");
			}

			if(inpAudioStream != null && audioChannels > 0){
				if ((ret = avcodec_copy_context(audio_st.codec(), inpAudioStream.codec()))  < 0) {
					throw new Exception("avcodec_copy_context() error:\tFailed to copy context from input audio to output audio stream codec context\n");
				}

				audioBitrate = (int) inpAudioStream.codec().bit_rate();
				sampleRate = inpAudioStream.codec().sample_rate();
				audioChannels = inpAudioStream.codec().channels();
				sampleFormat = inpAudioStream.codec().sample_fmt();
				audioQuality = inpAudioStream.codec().global_quality();
				audio_c.codec_tag(0);
				//                audio_st.pts(inpAudioStream.pts());
				audio_st.duration(inpAudioStream.duration());
				audio_st.time_base().num(inpAudioStream.time_base().num());
				audio_st.time_base().den(inpAudioStream.time_base().den());
			}

			audio_c.codec_id(oformat.audio_codec());
			audio_c.codec_type(AVMEDIA_TYPE_AUDIO);


			/* put sample parameters */
			audio_c.bit_rate(audioBitrate);
			audio_c.sample_rate(sampleRate);
			audio_c.channels(audioChannels);
			audio_c.channel_layout(av_get_default_channel_layout(audioChannels));
			if (sampleFormat != AV_SAMPLE_FMT_NONE) {
				audio_c.sample_fmt(sampleFormat);
			} else {
				// use AV_SAMPLE_FMT_S16 by default, if available
				audio_c.sample_fmt(AV_SAMPLE_FMT_FLTP);
				IntPointer formats = audio_c.codec().sample_fmts();
				for (int i = 0; formats.get(i) != -1; i++) {
					if (formats.get(i) == AV_SAMPLE_FMT_S16) {
						audio_c.sample_fmt(AV_SAMPLE_FMT_S16);
						break;
					}
				}
			}
			audio_c.time_base().num(1).den(sampleRate);
			audio_st.time_base().num(1).den(sampleRate);
			switch (audio_c.sample_fmt()) {
			case AV_SAMPLE_FMT_U8:
			case AV_SAMPLE_FMT_U8P:  audio_c.bits_per_raw_sample(8);  break;
			case AV_SAMPLE_FMT_S16:
			case AV_SAMPLE_FMT_S16P: audio_c.bits_per_raw_sample(16); break;
			case AV_SAMPLE_FMT_S32:
			case AV_SAMPLE_FMT_S32P: audio_c.bits_per_raw_sample(32); break;
			case AV_SAMPLE_FMT_FLT:
			case AV_SAMPLE_FMT_FLTP: audio_c.bits_per_raw_sample(32); break;
			case AV_SAMPLE_FMT_DBL:
			case AV_SAMPLE_FMT_DBLP: audio_c.bits_per_raw_sample(64); break;
			default: assert false;
			}
			if (audioQuality >= 0) {
				audio_c.flags(audio_c.flags() | AV_CODEC_FLAG_QSCALE);
				audio_c.global_quality((int)Math.round(FF_QP2LAMBDA * audioQuality));
			}

			// some formats want stream headers to be separate
			if ((oformat.flags() & AVFMT_GLOBALHEADER) != 0) {
				audio_c.flags(audio_c.flags() | AV_CODEC_FLAG_GLOBAL_HEADER);
			}

			if ((audio_codec.capabilities() & AV_CODEC_CAP_EXPERIMENTAL) != 0) {
				audio_c.strict_std_compliance(AVCodecContext.FF_COMPLIANCE_EXPERIMENTAL);
			}
		}

		/* now that all the parameters are set, we can open the audio and
           video codecs and allocate the necessary encode buffers */
		if (video_st != null && inpVideoStream == null) {
			AVDictionary options = new AVDictionary(null);
			if (videoQuality >= 0) {
				av_dict_set(options, "crf", "" + videoQuality, 0);
			}
			for (Entry<String, String> e : videoOptions.entrySet()) {
				av_dict_set(options, e.getKey(), e.getValue(), 0);
			}
			/* open the codec */
			if ((ret = avcodec_open2(video_c, video_codec, options)) < 0) {
				release();
				av_dict_free(options);
				throw new Exception("avcodec_open2() error " + ret + ": Could not open video codec.");
			}
			av_dict_free(options);

			video_outbuf = null;
			//            if ((oformat.flags() & AVFMT_RAWPICTURE) == 0) {
			//                /* allocate output buffer */
			//                /* XXX: API change will be done */
			//                /* buffers passed into lav* can be allocated any way you prefer,
			//                   as long as they're aligned enough for the architecture, and
			//                   they're freed appropriately (such as using av_free for buffers
			//                   allocated with av_malloc) */
			//                video_outbuf_size = Math.max(256 * 1024, 8 * video_c.width() * video_c.height()); // a la ffmpeg.c
			//                video_outbuf = new BytePointer(av_malloc(video_outbuf_size));
			//            }

			/* allocate the encoded raw picture */
			if ((picture = av_frame_alloc()) == null) {
				release();
				throw new Exception("av_frame_alloc() error: Could not allocate picture.");
			}
			picture.pts(0); // magic required by libx264

			int size = av_image_get_buffer_size(video_c.pix_fmt(), video_c.width(), video_c.height(), 1);
			if ((picture_buf = new BytePointer(av_malloc(size))).isNull()) {
				release();
				throw new Exception("av_malloc() error: Could not allocate picture buffer.");
			}

			/* if the output format is not equal to the image format, then a temporary
               picture is needed too. It is then converted to the required output format */
			if ((tmp_picture = av_frame_alloc()) == null) {
				release();
				throw new Exception("av_frame_alloc() error: Could not allocate temporary picture.");
			}

			/* copy the stream parameters to the muxer */
			if ((ret = avcodec_parameters_from_context(video_st.codecpar(), video_c)) < 0) {
				release();
				throw new Exception("avcodec_parameters_from_context() error: Could not copy the video stream parameters.");
			}

			AVDictionary metadata = new AVDictionary(null);
			for (Entry<String, String> e : videoMetadata.entrySet()) {
				av_dict_set(metadata, e.getKey(), e.getValue(), 0);
			}
			video_st.metadata(metadata);
		}

		if (audio_st != null && inpAudioStream == null) {
			AVDictionary options = new AVDictionary(null);
			if (audioQuality >= 0) {
				av_dict_set(options, "crf", "" + audioQuality, 0);
			}
			for (Entry<String, String> e : audioOptions.entrySet()) {
				av_dict_set(options, e.getKey(), e.getValue(), 0);
			}
			/* open the codec */
			if ((ret = avcodec_open2(audio_c, audio_codec, options)) < 0) {
				release();
				av_dict_free(options);
				throw new Exception("avcodec_open2() error " + ret + ": Could not open audio codec.");
			}
			av_dict_free(options);

			audio_outbuf_size = 256 * 1024;
			audio_outbuf = new BytePointer(av_malloc(audio_outbuf_size));

			/* ugly hack for PCM codecs (will be removed ASAP with new PCM
               support to compute the input frame size in samples */
			if (audio_c.frame_size() <= 1) {
				audio_outbuf_size = AV_INPUT_BUFFER_MIN_SIZE;
				audio_input_frame_size = audio_outbuf_size / audio_c.channels();
				switch (audio_c.codec_id()) {
				case AV_CODEC_ID_PCM_S16LE:
				case AV_CODEC_ID_PCM_S16BE:
				case AV_CODEC_ID_PCM_U16LE:
				case AV_CODEC_ID_PCM_U16BE:
					audio_input_frame_size >>= 1;
			break;
			default:
				break;
				}
			} else {
				audio_input_frame_size = audio_c.frame_size();
			}
			//int bufferSize = audio_input_frame_size * audio_c.bits_per_raw_sample()/8 * audio_c.channels();
			int planes = av_sample_fmt_is_planar(audio_c.sample_fmt()) != 0 ? (int)audio_c.channels() : 1;
			int data_size = av_samples_get_buffer_size((IntPointer)null, audio_c.channels(),
					audio_input_frame_size, audio_c.sample_fmt(), 1) / planes;
			samples_out = new BytePointer[planes];
			for (int i = 0; i < samples_out.length; i++) {
				samples_out[i] = new BytePointer(av_malloc(data_size)).capacity(data_size);
			}
			samples_in = new Pointer[AVFrame.AV_NUM_DATA_POINTERS];
			samples_in_ptr  = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS);
			samples_out_ptr = new PointerPointer(AVFrame.AV_NUM_DATA_POINTERS);

			/* allocate the audio frame */
			if ((frame = av_frame_alloc()) == null) {
				release();
				throw new Exception("av_frame_alloc() error: Could not allocate audio frame.");
			}
			frame.pts(0); // magic required by libvorbis and webm

			/* copy the stream parameters to the muxer */
			if ((ret = avcodec_parameters_from_context(audio_st.codecpar(), audio_c)) < 0) {
				release();
				throw new Exception("avcodec_parameters_from_context() error: Could not copy the audio stream parameters.");
			}

			AVDictionary metadata = new AVDictionary(null);
			for (Entry<String, String> e : audioMetadata.entrySet()) {
				av_dict_set(metadata, e.getKey(), e.getValue(), 0);
			}
			audio_st.metadata(metadata);
		}

		AVDictionary options = new AVDictionary(null);
		for (Entry<String, String> e : this.options.entrySet()) {
			av_dict_set(options, e.getKey(), e.getValue(), 0);
		}

		/* open the output file, if needed */
		if (outputStream == null && (oformat.flags() & AVFMT_NOFILE) == 0) {
			AVIOContext pb = new AVIOContext(null);
			if ((ret = avio_open2(pb, filename, AVIO_FLAG_WRITE, null, options)) < 0) {
				release();
				av_dict_free(options);
				throw new Exception("avio_open2 error() error " + ret + ": Could not open '" + filename + "'");
			}
			oc.pb(pb);
		}

		AVDictionary metadata = new AVDictionary(null);
		for (Entry<String, String> e : this.metadata.entrySet()) {
			av_dict_set(metadata, e.getKey(), e.getValue(), 0);
		}
		/* write the stream header, if any */
		avformat_write_header(oc.metadata(metadata), options);
		av_dict_free(options);

		if (av_log_get_level() >= AV_LOG_INFO) {
			av_dump_format(oc, 0, filename, 1);
		}
	}

	public void stop() throws Exception {
		if (oc != null) {
			try {
				synchronized (oc) {
					/* flush all the buffers */
					while (video_st != null && ifmt_ctx == null && recordImage(0, 0, 0, 0, 0, AV_PIX_FMT_NONE, (Buffer[])null));
					while (audio_st != null && ifmt_ctx == null && recordSamples(0, 0, (Buffer[])null));

					if (interleaved && video_st != null && audio_st != null) {
						av_interleaved_write_frame(oc, null);
					} else {
						av_write_frame(oc, null);
					}

					/* write the trailer, if any */
					av_write_trailer(oc);
				}
			} finally {
				release();
			}
		}
	}

	@Override public void record(Frame frame) throws Exception {
		record(frame, AV_PIX_FMT_NONE);
	}
	public void record(Frame frame, int pixelFormat) throws Exception {
		if (frame == null || (frame.image == null && frame.samples == null)) {
			recordImage(0, 0, 0, 0, 0, pixelFormat, (Buffer[])null);
		} else {
			if (frame.image != null) {
				frame.keyFrame = recordImage(frame.imageWidth, frame.imageHeight, frame.imageDepth,
						frame.imageChannels, frame.imageStride, pixelFormat, frame.image);
			}
			if (frame.samples != null) {
				frame.keyFrame = recordSamples(frame.sampleRate, frame.audioChannels, frame.samples);
			}
		}
	}

	public boolean recordImage(int width, int height, int depth, int channels, int stride, int pixelFormat, Buffer ... image) throws Exception {
		if (video_st == null) {
			throw new Exception("No video output stream (Is imageWidth > 0 && imageHeight > 0 and has start() been called?)");
		}
		int ret;

		if (image == null || image.length == 0) {
			/* no more frame to compress. The codec has a latency of a few
               frames if using B frames, so we get the last frames by
               passing the same picture again */
		} else {
			int step = stride * Math.abs(depth) / 8;
			BytePointer data = image[0] instanceof ByteBuffer
					? new BytePointer((ByteBuffer)image[0].position(0))
							: new BytePointer(new Pointer(image[0].position(0)));

					if (pixelFormat == AV_PIX_FMT_NONE) {
						if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 3) {
							pixelFormat = AV_PIX_FMT_BGR24;
						} else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 1) {
							pixelFormat = AV_PIX_FMT_GRAY8;
						} else if ((depth == Frame.DEPTH_USHORT || depth == Frame.DEPTH_SHORT) && channels == 1) {
							pixelFormat = ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN) ?
									AV_PIX_FMT_GRAY16BE : AV_PIX_FMT_GRAY16LE;
						} else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 4) {
							pixelFormat = AV_PIX_FMT_RGBA;
						} else if ((depth == Frame.DEPTH_UBYTE || depth == Frame.DEPTH_BYTE) && channels == 2) {
							pixelFormat = AV_PIX_FMT_NV21; // Android's camera capture format
						} else {
							throw new Exception("Could not guess pixel format of image: depth=" + depth + ", channels=" + channels);
						}
					}

					if (pixelFormat == AV_PIX_FMT_NV21) {
						step = width;
					}

					if (video_c.pix_fmt() != pixelFormat || video_c.width() != width || video_c.height() != height) {
						/* convert to the codec pixel format if needed */
						img_convert_ctx = sws_getCachedContext(img_convert_ctx, width, height, pixelFormat,
								video_c.width(), video_c.height(), video_c.pix_fmt(),
								imageScalingFlags != 0 ? imageScalingFlags : SWS_BILINEAR,
										null, null, (DoublePointer)null);
						if (img_convert_ctx == null) {
							throw new Exception("sws_getCachedContext() error: Cannot initialize the conversion context.");
						}
						av_image_fill_arrays(new PointerPointer(tmp_picture), tmp_picture.linesize(), data, pixelFormat, width, height, 1);
						av_image_fill_arrays(new PointerPointer(picture), picture.linesize(), picture_buf, video_c.pix_fmt(), video_c.width(), video_c.height(), 1);
						tmp_picture.linesize(0, step);
						tmp_picture.format(pixelFormat);
						tmp_picture.width(width);
						tmp_picture.height(height);
						picture.format(video_c.pix_fmt());
						picture.width(video_c.width());
						picture.height(video_c.height());
						sws_scale(img_convert_ctx, new PointerPointer(tmp_picture), tmp_picture.linesize(),
								0, height, new PointerPointer(picture), picture.linesize());
					} else {
						av_image_fill_arrays(new PointerPointer(picture), picture.linesize(), data, pixelFormat, width, height, 1);
						picture.linesize(0, step);
						picture.format(pixelFormat);
						picture.width(width);
						picture.height(height);
					}
		}

		//        if ((oformat.flags() & AVFMT_RAWPICTURE) != 0) {
		//            if (image == null || image.length == 0) {
		//                return false;
		//            }
		//            /* raw video case. The API may change slightly in the future for that? */
		//            av_init_packet(video_pkt);
		//            video_pkt.flags(video_pkt.flags() | AV_PKT_FLAG_KEY);
		//            video_pkt.stream_index(video_st.index());
		//            video_pkt.data(new BytePointer(picture));
		//            video_pkt.size(Loader.sizeof(AVFrame.class));
		//        } else {
		/* encode the image */
		av_init_packet(video_pkt);
		video_pkt.data(video_outbuf);
		video_pkt.size(video_outbuf_size);
		picture.quality(video_c.global_quality());
		if ((ret = avcodec_encode_video2(video_c, video_pkt, image == null || image.length == 0 ? null : picture, got_video_packet)) < 0) {
			throw new Exception("avcodec_encode_video2() error " + ret + ": Could not encode video packet.");
		}
		picture.pts(picture.pts() + 1); // magic required by libx264

		/* if zero size, it means the image was buffered */
		if (got_video_packet[0] != 0) {
			if (video_pkt.pts() != AV_NOPTS_VALUE) {
				video_pkt.pts(av_rescale_q(video_pkt.pts(), video_c.time_base(), video_st.time_base()));
			}
			if (video_pkt.dts() != AV_NOPTS_VALUE) {
				video_pkt.dts(av_rescale_q(video_pkt.dts(), video_c.time_base(), video_st.time_base()));
			}
			video_pkt.stream_index(video_st.index());
		} else {
			return false;
		}
		//        }

		writePacket(AVMEDIA_TYPE_VIDEO, video_pkt);
		return image != null ? (video_pkt.flags() & AV_PKT_FLAG_KEY) != 0 : got_video_packet[0] != 0;
	}

	public boolean recordSamples(Buffer ... samples) throws Exception {
		return recordSamples(0, 0, samples);
	}
	public boolean recordSamples(int sampleRate, int audioChannels, Buffer ... samples) throws Exception {
		if (audio_st == null) {
			throw new Exception("No audio output stream (Is audioChannels > 0 and has start() been called?)");
		}

		if (samples == null && samples_out[0].position() > 0) {
			// Typically samples_out[0].limit() is double the audio_input_frame_size --> sampleDivisor = 2
			double sampleDivisor = Math.floor((int)Math.min(samples_out[0].limit(), Integer.MAX_VALUE) / audio_input_frame_size);
			writeSamples((int)Math.floor((int)samples_out[0].position() / sampleDivisor));
			return record((AVFrame)null);
		}

		int ret;

		if (sampleRate <= 0) {
			sampleRate = audio_c.sample_rate();
		}
		if (audioChannels <= 0) {
			audioChannels = audio_c.channels();
		}
		int inputSize = samples != null ? samples[0].limit() - samples[0].position() : 0;
		int inputFormat = samples_format;
		int inputChannels = samples != null && samples.length > 1 ? 1 : audioChannels;
		int inputDepth = 0;
		int outputFormat = audio_c.sample_fmt();
		int outputChannels = samples_out.length > 1 ? 1 : audio_c.channels();
		int outputDepth = av_get_bytes_per_sample(outputFormat);
		if (samples != null && samples[0] instanceof ByteBuffer) {
			inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_U8P : AV_SAMPLE_FMT_U8;
			inputDepth = 1;
			for (int i = 0; i < samples.length; i++) {
				ByteBuffer b = (ByteBuffer)samples[i];
				if (samples_in[i] instanceof BytePointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
					((BytePointer)samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
				} else {
					samples_in[i] = new BytePointer(b);
				}
			}
		} else if (samples != null && samples[0] instanceof ShortBuffer) {
			inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
			inputDepth = 2;
			for (int i = 0; i < samples.length; i++) {
				ShortBuffer b = (ShortBuffer)samples[i];
				if (samples_in[i] instanceof ShortPointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
					((ShortPointer)samples_in[i]).position(0).put(b.array(), samples[i].position(), inputSize);
				} else {
					samples_in[i] = new ShortPointer(b);
				}
			}
		} else if (samples != null && samples[0] instanceof IntBuffer) {
			inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
			inputDepth = 4;
			for (int i = 0; i < samples.length; i++) {
				IntBuffer b = (IntBuffer)samples[i];
				if (samples_in[i] instanceof IntPointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
					((IntPointer)samples_in[i]).position(0).put(b.array(), samples[i].position(), inputSize);
				} else {
					samples_in[i] = new IntPointer(b);
				}
			}
		} else if (samples != null && samples[0] instanceof FloatBuffer) {
			inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
			inputDepth = 4;
			for (int i = 0; i < samples.length; i++) {
				FloatBuffer b = (FloatBuffer)samples[i];
				if (samples_in[i] instanceof FloatPointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
					((FloatPointer)samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
				} else {
					samples_in[i] = new FloatPointer(b);
				}
			}
		} else if (samples != null && samples[0] instanceof DoubleBuffer) {
			inputFormat = samples.length > 1 ? AV_SAMPLE_FMT_DBLP : AV_SAMPLE_FMT_DBL;
			inputDepth = 8;
			for (int i = 0; i < samples.length; i++) {
				DoubleBuffer b = (DoubleBuffer)samples[i];
				if (samples_in[i] instanceof DoublePointer && samples_in[i].capacity() >= inputSize && b.hasArray()) {
					((DoublePointer)samples_in[i]).position(0).put(b.array(), b.position(), inputSize);
				} else {
					samples_in[i] = new DoublePointer(b);
				}
			}
		} else if (samples != null) {
			throw new Exception("Audio samples Buffer has unsupported type: " + samples);
		}

		if (samples_convert_ctx == null || samples_channels != audioChannels || samples_format != inputFormat || samples_rate != sampleRate) {
			samples_convert_ctx = swr_alloc_set_opts(samples_convert_ctx, audio_c.channel_layout(), outputFormat, audio_c.sample_rate(),
					av_get_default_channel_layout(audioChannels), inputFormat, sampleRate, 0, null);
			if (samples_convert_ctx == null) {
				throw new Exception("swr_alloc_set_opts() error: Cannot allocate the conversion context.");
			} else if ((ret = swr_init(samples_convert_ctx)) < 0) {
				throw new Exception("swr_init() error " + ret + ": Cannot initialize the conversion context.");
			}
			samples_channels = audioChannels;
			samples_format = inputFormat;
			samples_rate = sampleRate;
		}

		for (int i = 0; samples != null && i < samples.length; i++) {
			samples_in[i].position(samples_in[i].position() * inputDepth).
			limit((samples_in[i].position() + inputSize) * inputDepth);
		}
		while (true) {
			int inputCount = (int)Math.min(samples != null ? (samples_in[0].limit() - samples_in[0].position()) / (inputChannels * inputDepth) : 0, Integer.MAX_VALUE);
			int outputCount = (int)Math.min((samples_out[0].limit() - samples_out[0].position()) / (outputChannels * outputDepth), Integer.MAX_VALUE);
			inputCount = Math.min(inputCount, (outputCount * sampleRate + audio_c.sample_rate() - 1) / audio_c.sample_rate());
			for (int i = 0; samples != null && i < samples.length; i++) {
				samples_in_ptr.put(i, samples_in[i]);
			}
			for (int i = 0; i < samples_out.length; i++) {
				samples_out_ptr.put(i, samples_out[i]);
			}
			if ((ret = swr_convert(samples_convert_ctx, samples_out_ptr, outputCount, samples_in_ptr, inputCount)) < 0) {
				throw new Exception("swr_convert() error " + ret + ": Cannot convert audio samples.");
			} else if (ret == 0) {
				break;
			}
			for (int i = 0; samples != null && i < samples.length; i++) {
				samples_in[i].position(samples_in[i].position() + inputCount * inputChannels * inputDepth);
			}
			for (int i = 0; i < samples_out.length; i++) {
				samples_out[i].position(samples_out[i].position() + ret * outputChannels * outputDepth);
			}

			if (samples == null || samples_out[0].position() >= samples_out[0].limit()) {
				writeSamples(audio_input_frame_size);
			}
		}
		return samples != null ? frame.key_frame() != 0 : record((AVFrame)null);
	}

	private void writeSamples(int nb_samples) throws Exception {
		if (samples_out == null || samples_out.length == 0) {
			return;
		}

		frame.nb_samples(nb_samples);
		avcodec_fill_audio_frame(frame, audio_c.channels(), audio_c.sample_fmt(), samples_out[0], (int)samples_out[0].position(), 0);
		for (int i = 0; i < samples_out.length; i++) {
			int linesize = 0;
			if (samples_out[0].position() > 0 && samples_out[0].position() < samples_out[0].limit()) {
				linesize = (int)samples_out[i].position();
			} else {
				linesize = (int)Math.min(samples_out[i].limit(), Integer.MAX_VALUE);
			}

			frame.data(i, samples_out[i].position(0));
			frame.linesize(i, linesize);
		}
		frame.quality(audio_c.global_quality());
		record(frame);
	}

	boolean record(AVFrame frame) throws Exception {
		int ret;

		av_init_packet(audio_pkt);
		audio_pkt.data(audio_outbuf);
		audio_pkt.size(audio_outbuf_size);
		if ((ret = avcodec_encode_audio2(audio_c, audio_pkt, frame, got_audio_packet)) < 0) {
			throw new Exception("avcodec_encode_audio2() error " + ret + ": Could not encode audio packet.");
		}
		if (frame != null) {
			frame.pts(frame.pts() + frame.nb_samples()); // magic required by libvorbis and webm
		}
		if (got_audio_packet[0] != 0) {
			if (audio_pkt.pts() != AV_NOPTS_VALUE) {
				audio_pkt.pts(av_rescale_q(audio_pkt.pts(), audio_c.time_base(), audio_st.time_base()));
			}
			if (audio_pkt.dts() != AV_NOPTS_VALUE) {
				audio_pkt.dts(av_rescale_q(audio_pkt.dts(), audio_c.time_base(), audio_st.time_base()));
			}
			audio_pkt.flags(audio_pkt.flags() | AV_PKT_FLAG_KEY);
			audio_pkt.stream_index(audio_st.index());
		} else {
			return false;
		}

		/* write the compressed frame in the media file */
		writePacket(AVMEDIA_TYPE_AUDIO, audio_pkt);

		return true;
	}

	private void writePacket(int mediaType, AVPacket avPacket) throws Exception {

		AVStream avStream = (mediaType == AVMEDIA_TYPE_VIDEO) ? audio_st : (mediaType == AVMEDIA_TYPE_AUDIO) ? video_st : null;
		String mediaTypeStr = (mediaType == AVMEDIA_TYPE_VIDEO) ? "video" : (mediaType == AVMEDIA_TYPE_AUDIO) ? "audio" : "unsupported media stream type";

		synchronized (oc) {
			int ret;
			if (interleaved && avStream != null) {
				if ((ret = av_interleaved_write_frame(oc, avPacket)) < 0) {
					throw new Exception("av_interleaved_write_frame() error " + ret + " while writing interleaved " + mediaTypeStr + " packet.");
				}
			} else {
				if ((ret = av_write_frame(oc, avPacket)) < 0) {
					throw new Exception("av_write_frame() error " + ret + " while writing " + mediaTypeStr + " packet.");
				}
			}
		}


	}
	public boolean recordPacket(AVPacket pkt,long pts,long dts) throws Exception {
		try {
			if (pkt == null) {
				return false;
			}

			AVStream in_stream = ifmt_ctx.streams(pkt.stream_index());
			/**
			 * Repair the problem of error decoding and playback caused by the absence of dts/pts
			 * in the output audio/video file or audio/video stream,
			 * Comment out this line of code so that PTS / DTS can specify the timestamp manually.
			 */
			//        pkt.dts(AV_NOPTS_VALUE);
			//        pkt.pts(AV_NOPTS_VALUE);
			pkt.pos(-1);
			if (in_stream.codec().codec_type() == AVMEDIA_TYPE_VIDEO && video_st != null) {

				pkt.stream_index(video_st.index());
				pkt.duration((int) av_rescale_q(pkt.duration(), in_stream.codec().time_base(), video_st.codec().time_base()));
//			pkt.dts(av_rescale_q_rnd(pkt.dts(), in_stream.time_base(), video_st.time_base(),(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)));//Increase dts calculation
				pkt.pts(pts);
				pkt.dts(pts);
				writePacket(AVMEDIA_TYPE_VIDEO, pkt);

			} else if (in_stream.codec().codec_type() == AVMEDIA_TYPE_AUDIO && audio_st != null && (audioChannels > 0)) {

				pkt.stream_index(audio_st.index());
				pkt.duration((int) av_rescale_q(pkt.duration(), in_stream.codec().time_base(), audio_st.codec().time_base()));
				pkt.pts(av_rescale_q_rnd(pkt.pts(), in_stream.time_base(), audio_st.time_base(),(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)));//Increase pts calculation
				pkt.dts(av_rescale_q_rnd(pkt.dts(), in_stream.time_base(), audio_st.time_base(),(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)));//Increase dts calculation
				writePacket(AVMEDIA_TYPE_AUDIO, pkt);
			}

			return true;
		}finally {
			av_packet_unref(pkt);
			//av_freep(pkt);
		}

	}
}

推流处理类 RtmpPusher

import java.io.IOException;
import java.io.PipedInputStream;
import java.io.PipedOutputStream;
import java.util.concurrent.ConcurrentLinkedDeque;

import com.fengyulei.fylsipserver.media.netty.*;
import org.bytedeco.javacpp.avcodec;
import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class RtmpPusher implements Runnable{

	private Logger log = LoggerFactory.getLogger(getClass());
	//输入管道
	private PipedInputStream pis;
	//输出管道
	private PipedOutputStream pos = new PipedOutputStream();

	public boolean mRunning = true;

	private String address;
	//rtp ps 流唯一标识 端口复用情况下需要使用
	private String ssrc;
	//接收流得方式 tcp或udp
	private String type;

	public String getType() {
		return type;
	}

	public String getSsrc() {
		return ssrc;
	}
	//最后一次pts
	private long mLastPts;

	private boolean mIs90000TimeBase = false;
	//最新流处理时间
	private volatile Long readTime=0L;
	//保存当前退流线程
	private Thread thread;

	//拉流器
	private FFmpegFrameGrabber grabber = null;
	//推流器
	private CustomFFmpegFrameRecorder recorder = null;


	private ConcurrentLinkedDeque<Long> mPtsQueue = new ConcurrentLinkedDeque<>();

	public void clear(){
        mPtsQueue.clear();
    }

	public RtmpPusher(String address,String ssrc,String type){
		this.address = address;
		this.ssrc = ssrc;
		this.type=type;
	}
	//将数据写入管道
	public void onMediaStream(byte[] data, int offset,int length,boolean isAudio){
		try {
			if(!isAudio&&mRunning){
				pos.write(data,offset,length);
			}
		} catch (IOException e) {
			log.error(e.getMessage());
		}
	}

	/**
	 * 有的设备 timebase 90000,有的直接 1000
	 */
	public void onPts(long pts,boolean isAudio) {
		if(isAudio){
			return;
		}
		//分辨timebase是 90000 还是 1000
		//如果是90000 pts/90
		if(mLastPts == 0 && pts != 0){
			mIs90000TimeBase = (pts >= 3000);
		}
		if(mIs90000TimeBase){
			pts = pts / 90;
		}
		//如果当前pts小于之前的pts
		//推流会崩溃
		//av_write_frame() error -22 while writing video packet.
		if(mLastPts != 0 && pts < mLastPts){
			pts = mLastPts + 40;
		}
		mPtsQueue.add(pts);
		mLastPts = pts;
		//log.info("pts >>> {}",pts);
	}
	@Override
	public void run() {

		Long pts  = 0L;

		try{
			//默认1024
			pis = new PipedInputStream(pos,1024);
			// 这里不要默认,会导致内存溢出,调小。太大会造成播放延时
			grabber = new FFmpegFrameGrabber(pis,1024);
			//阻塞式,直到通道有数据
			grabber.start();
			log.info("[{}]-->[{}] grabber启动",type,ssrc);
			//参考javacv配置
			recorder = new CustomFFmpegFrameRecorder(address,1280,720,0);
			recorder.setInterleaved(true);
			recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
			recorder.setFormat("flv");
			recorder.setFrameRate(grabber.getFrameRate());
			recorder.start(grabber.getFormatContext());
			avcodec.AVPacket avPacket;


			readTime=System.currentTimeMillis();
			//加入超时检测
			if(!SsrcLiveCheck.rtmps.contains(this)){
				SsrcLiveCheck.rtmps.add(this);
			}
			log.info("[{}]-->[{}] 推流器启动",type,ssrc);
			while(mRunning){
				if((avPacket=grabber.grabPacket()) != null && avPacket.size() >0 && avPacket.data() != null){
					//每次获取包都更新时间戳 用于超时断开
					readTime=System.currentTimeMillis();
					pts = mPtsQueue.pop();
					if(pts==null){
						continue;
					}
					//pts+=40;
					recorder.recordPacket(avPacket,pts,pts);
					//continue;
				}
				//log.warn("avPacket is null");
			}
		}catch(Exception e){
			//e.printStackTrace();
			log.error(e.getMessage(),e);
			//System.exit(1);

		}finally{
			close();
		}
		log.error("推流结束");
	}

	public void close(){
		log.info("关闭推流器");
		try{
			if(recorder != null){
				log.info("关闭recorder");
				try {
					recorder.close();
				}catch (Exception e){
					log.error(e.getMessage(),e);
				}
			}
			if(grabber != null){
				log.info("关闭grabber");
				try {
					grabber.close();
				}catch (Exception e){
					log.error(e.getMessage(),e);
				}

			}
			pos.close();
			pis.close();
		}catch(Exception e){
			log.error(e.getMessage(),e);

		}
		mRunning=false;

	}

	public boolean checkTimeout(){
		//超时最大延时3s
		if(System.currentTimeMillis()-readTime>3000){
			return true;
		}
		return false;
	}

	//两个都要退出 避免线程阻塞
	public void stopRemux() {
		this.mRunning = false;
		thread.interrupt();
	}

	public void startRemux() {
		//保存线程,避免grabPacket阻塞无法自动退出while循环 需要调用interrupt退出阻塞
		thread=new Thread(this);
		thread.setDaemon(true);
		thread.setName("RtmpPusher thread "+ssrc);
		thread.start();
	}
}

推流器超时检测

import com.fengyulei.fylsipserver.media.push.RtmpPusher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;

@Component
public class SsrcLiveCheck implements Runnable{

    private static final Logger logger = LoggerFactory.getLogger(SsrcLiveCheck.class);

    public static final List<RtmpPusher> rtmps=new ArrayList<>();

    private Boolean flag=true;

    private int time=0;

    //@Autowired
    //private MediaUdpHandler mediaUdpHandler;

    //@Autowired
    //private MediaTcpHandler mediaTcpHandler;

    @PostConstruct
    private void init(){
        Thread thread=new Thread(this);
        thread.setDaemon(true);
        thread.setName("SsrcLiveCheck thread");
        thread.start();
    }


    @Override
    public void run() {

        while (flag){
            Iterator<RtmpPusher> iterator=rtmps.iterator();
            while (iterator.hasNext()) {
                RtmpPusher rtmpPusher = iterator.next();
                if(rtmpPusher.mRunning){
                    if(rtmpPusher.checkTimeout()){
                        if("UDP".equals(rtmpPusher.getType())){
                            MediaTcpHandler.remove(rtmpPusher.getSsrc());
                        }else if("TCP".equals(rtmpPusher.getType())){
                            SsrcTcpHandler ssrcTcpHandler=MediaTcpHandler.remove(rtmpPusher.getSsrc());
                            ssrcTcpHandler.getChannel().close();
                        }

                        rtmpPusher.stopRemux();
                        iterator.remove();
                        logger.error("超时");
                    }
                }
            }
            time++;
            if(time==15){
                time=0;
                //Runtime.getRuntime().gc();
            }
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                logger.error(e.getMessage(),e);
            }
        }

    }
}

推出rtmp之后可以使用nginx接收rtmp流进行播放,有疑问欢迎大佬指正

版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://blog.csdn.net/eguid_1/article/details/108754698

智能推荐

分布式光纤传感器的全球与中国市场2022-2028年:技术、参与者、趋势、市场规模及占有率研究报告_预计2026年中国分布式传感器市场规模有多大-程序员宅基地

文章浏览阅读3.2k次。本文研究全球与中国市场分布式光纤传感器的发展现状及未来发展趋势,分别从生产和消费的角度分析分布式光纤传感器的主要生产地区、主要消费地区以及主要的生产商。重点分析全球与中国市场的主要厂商产品特点、产品规格、不同规格产品的价格、产量、产值及全球和中国市场主要生产商的市场份额。主要生产商包括:FISO TechnologiesBrugg KabelSensor HighwayOmnisensAFL GlobalQinetiQ GroupLockheed MartinOSENSA Innovati_预计2026年中国分布式传感器市场规模有多大

07_08 常用组合逻辑电路结构——为IC设计的延时估计铺垫_基4布斯算法代码-程序员宅基地

文章浏览阅读1.1k次,点赞2次,收藏12次。常用组合逻辑电路结构——为IC设计的延时估计铺垫学习目的:估计模块间的delay,确保写的代码的timing 综合能给到多少HZ,以满足需求!_基4布斯算法代码

OpenAI Manager助手(基于SpringBoot和Vue)_chatgpt网页版-程序员宅基地

文章浏览阅读3.3k次,点赞3次,收藏5次。OpenAI Manager助手(基于SpringBoot和Vue)_chatgpt网页版

关于美国计算机奥赛USACO,你想知道的都在这_usaco可以多次提交吗-程序员宅基地

文章浏览阅读2.2k次。USACO自1992年举办,到目前为止已经举办了27届,目的是为了帮助美国信息学国家队选拔IOI的队员,目前逐渐发展为全球热门的线上赛事,成为美国大学申请条件下,含金量相当高的官方竞赛。USACO的比赛成绩可以助力计算机专业留学,越来越多的学生进入了康奈尔,麻省理工,普林斯顿,哈佛和耶鲁等大学,这些同学的共同点是他们都参加了美国计算机科学竞赛(USACO),并且取得过非常好的成绩。适合参赛人群USACO适合国内在读学生有意向申请美国大学的或者想锻炼自己编程能力的同学,高三学生也可以参加12月的第_usaco可以多次提交吗

MySQL存储过程和自定义函数_mysql自定义函数和存储过程-程序员宅基地

文章浏览阅读394次。1.1 存储程序1.2 创建存储过程1.3 创建自定义函数1.3.1 示例1.4 自定义函数和存储过程的区别1.5 变量的使用1.6 定义条件和处理程序1.6.1 定义条件1.6.1.1 示例1.6.2 定义处理程序1.6.2.1 示例1.7 光标的使用1.7.1 声明光标1.7.2 打开光标1.7.3 使用光标1.7.4 关闭光标1.8 流程控制的使用1.8.1 IF语句1.8.2 CASE语句1.8.3 LOOP语句1.8.4 LEAVE语句1.8.5 ITERATE语句1.8.6 REPEAT语句。_mysql自定义函数和存储过程

半导体基础知识与PN结_本征半导体电流为0-程序员宅基地

文章浏览阅读188次。半导体二极管——集成电路最小组成单元。_本征半导体电流为0

随便推点

【Unity3d Shader】水面和岩浆效果_unity 岩浆shader-程序员宅基地

文章浏览阅读2.8k次,点赞3次,收藏18次。游戏水面特效实现方式太多。咱们这边介绍的是一最简单的UV动画(无顶点位移),整个mesh由4个顶点构成。实现了水面效果(左图),不动代码稍微修改下参数和贴图可以实现岩浆效果(右图)。有要思路是1,uv按时间去做正弦波移动2,在1的基础上加个凹凸图混合uv3,在1、2的基础上加个水流方向4,加上对雾效的支持,如没必要请自行删除雾效代码(把包含fog的几行代码删除)S..._unity 岩浆shader

广义线性模型——Logistic回归模型(1)_广义线性回归模型-程序员宅基地

文章浏览阅读5k次。广义线性模型是线性模型的扩展,它通过连接函数建立响应变量的数学期望值与线性组合的预测变量之间的关系。广义线性模型拟合的形式为:其中g(μY)是条件均值的函数(称为连接函数)。另外,你可放松Y为正态分布的假设,改为Y 服从指数分布族中的一种分布即可。设定好连接函数和概率分布后,便可以通过最大似然估计的多次迭代推导出各参数值。在大部分情况下,线性模型就可以通过一系列连续型或类别型预测变量来预测正态分布的响应变量的工作。但是,有时候我们要进行非正态因变量的分析,例如:(1)类别型.._广义线性回归模型

HTML+CSS大作业 环境网页设计与实现(垃圾分类) web前端开发技术 web课程设计 网页规划与设计_垃圾分类网页设计目标怎么写-程序员宅基地

文章浏览阅读69次。环境保护、 保护地球、 校园环保、垃圾分类、绿色家园、等网站的设计与制作。 总结了一些学生网页制作的经验:一般的网页需要融入以下知识点:div+css布局、浮动、定位、高级css、表格、表单及验证、js轮播图、音频 视频 Flash的应用、ul li、下拉导航栏、鼠标划过效果等知识点,网页的风格主题也很全面:如爱好、风景、校园、美食、动漫、游戏、咖啡、音乐、家乡、电影、名人、商城以及个人主页等主题,学生、新手可参考下方页面的布局和设计和HTML源码(有用点赞△) 一套A+的网_垃圾分类网页设计目标怎么写

C# .Net 发布后,把dll全部放在一个文件夹中,让软件目录更整洁_.net dll 全局目录-程序员宅基地

文章浏览阅读614次,点赞7次,收藏11次。之前找到一个修改 exe 中 DLL地址 的方法, 不太好使,虽然能正确启动, 但无法改变 exe 的工作目录,这就影响了.Net 中很多获取 exe 执行目录来拼接的地址 ( 相对路径 ),比如 wwwroot 和 代码中相对目录还有一些复制到目录的普通文件 等等,它们的地址都会指向原来 exe 的目录, 而不是自定义的 “lib” 目录,根本原因就是没有修改 exe 的工作目录这次来搞一个启动程序,把 .net 的所有东西都放在一个文件夹,在文件夹同级的目录制作一个 exe._.net dll 全局目录

BRIEF特征点描述算法_breif description calculation 特征点-程序员宅基地

文章浏览阅读1.5k次。本文为转载,原博客地址:http://blog.csdn.net/hujingshuang/article/details/46910259简介 BRIEF是2010年的一篇名为《BRIEF:Binary Robust Independent Elementary Features》的文章中提出,BRIEF是对已检测到的特征点进行描述,它是一种二进制编码的描述子,摈弃了利用区域灰度..._breif description calculation 特征点

房屋租赁管理系统的设计和实现,SpringBoot计算机毕业设计论文_基于spring boot的房屋租赁系统论文-程序员宅基地

文章浏览阅读4.1k次,点赞21次,收藏79次。本文是《基于SpringBoot的房屋租赁管理系统》的配套原创说明文档,可以给应届毕业生提供格式撰写参考,也可以给开发类似系统的朋友们提供功能业务设计思路。_基于spring boot的房屋租赁系统论文