雖然還是沒有搞出來,但總感覺快了哈哈(哪來的自信)
1、RTP協議接受數據
#region 1-RTP協議變量聲明 RTPsession session; RTPReceiver receiver; RTPParticipant participant; private Dictionary<uint, List<RTPPacket>> Clients;#endregion #region 對RTP進行初始化,并接收數據,調用之后就可以接收數據了 session = new RTPSession(); receiver = new RTPReceiver(); IPEndPoint rtpEp = new IPEndPoint(IPAddress.Parse("192.168.1.109"), 5000); participant = new RTPParticipant(rtpEp); receiver.AddParticipant(participant); session.NewRTPPacket = new RTPSession.NewRTPPacket_Callback(NewRTPPacket); session.AddReceiver(receiver); Clients = new Dictionary<uint, List<RTPPacket>>();#endregion
其中NewRTPPackt是
public delegate bool NewRTPPacket_Callback( RTPPacket packet)
類型的委托。packet為接收到的RTP包,我們就對這些包進行處理得到想要的幀,然后再把幀進行解碼,得到想要的圖像(我是這樣理解的)
2、H.264進行解碼
我從網絡上搜索到了一個海思的DLL,可以對H.264進行解碼
#region 解碼器相關變量聲明 /// <summary> /// 數據的句柄 /// </summary> IntPtr pData; /// <summary> /// 這是解碼器屬性信息 /// </summary> public H264Dec.hiH264_DEC_ATTR_S decAttr; /// <summary> /// 這是解碼器輸出圖像信息 /// </summary> public H264Dec.hiH264_DEC_FRAME_S _decodeFrame = new H264Dec.hiH264_DEC_FRAME_S(); /// <summary> /// 解碼器句柄 /// </summary> public IntPtr _decHandle; #endregion #region 解碼器相關初始化,一般在窗口load中進行初始化 decAttr = new H264Dec.hiH264_DEC_ATTR_S(); decAttr.uPictureFormat = 0; decAttr.uStreamInType = 0; decAttr.uPicWidthInMB = 480 >> 4; decAttr.uPicHeightInMB = 640 >> 4; decAttr.uBufNum = 8; decAttr.uWorkMode = 16; //創建、初始化解碼器句柄 _decHandle = H264Dec.Hi264DecCreate(ref decAttr); //_decodeFrame = new H264Dec.hiH264_DEC_FRAME_S();#endregion//這一寫代碼就是h264解碼的代碼,其中未聲明的函數和變量會在下面進行聲明給出,主要是講YUV轉為RGB,在保存為Bitmap文件if (H264Dec.Hi264DecAU(_decHandle, pData, (uint)newData.Length, 0, ref _decodeFrame, 0) == 0) { if (_decodeFrame.bError == 0) { //策畫 y u v 的長度 var yLength = _decodeFrame.uHeight * _decodeFrame.uYStride; var uLength = _decodeFrame.uHeight * _decodeFrame.uUVStride / 2; var vLength = uLength; var yBytes = new byte[yLength]; var uBytes = new byte[uLength]; var vBytes = new byte[vLength]; var decodedBytes = new byte[yLength + uLength + vLength]; //_decodeFrame 是解碼后的數據對象,里面包含 YUV 數據、寬度、高度等信息 Marshal.Copy(_decodeFrame.pY, yBytes, 0, (int)yLength); Marshal.Copy(_decodeFrame.pU, uBytes, 0, (int)uLength); Marshal.Copy(_decodeFrame.pV, vBytes, 0, (int)vLength); //將從 _decodeFrame 中取出的 YUV 數據放入 decodedBytes 中 Array.Copy(yBytes, decodedBytes, yLength); Array.Copy(uBytes, 0, decodedBytes, yLength, uLength); Array.Copy(vBytes, 0, decodedBytes, yLength + uLength, vLength); ConvertYUV2RGB(yuv, rgb, width, height); ConvertYUV2RGB(decodedBytes, rgb, width, height); // 寫 BMP 文件。 WriteBMP(rgb, width, height, string.Format("E://test//yuv2bmp_{0}.bmp", index++)); } }
其中pData為需要的一幀數據,因為pData為Intptr類型,而一幀數據是byte[]類型,所以我從網上查了查怎么轉換,下面是代碼,newData是byte【】,pData是intptr類型。
GCHandle hObject = GCHandle.Alloc(newData, GCHandleType.Pinned); pData = hObject.AddrOfPinnedObject();
H264解碼類
public class H264Dec { public const int HI_SUCCESS = 0; public const int HI_FAILURE = -1; public const int HI_LITTLE_ENDIAN = 1234; public const int HI_BIG_ENDIAN = 4321; public const int HI_DECODER_SLEEP_TIME = 60000; public const int HI_H264DEC_OK = 0; public const int HI_H264DEC_NEED_MORE_BITS = -1; public const int HI_H264DEC_NO_PICTURE = -2; public const int HI_H264DEC_ERR_HANDLE = -3; [DllImport("hi_h264dec_w.dll", EntryPoint = "Hi264DecImageEnhance", CallingConvention = CallingConvention.Cdecl)] public static extern int Hi264DecImageEnhance(IntPtr hDec, ref hiH264_DEC_FRAME_S pDecFrame, uint uEnhanceCoeff); [DllImport("hi_h264dec_w.dll", EntryPoint = "Hi264DecCreate", CallingConvention = CallingConvention.Cdecl)] public static extern IntPtr Hi264DecCreate(ref hiH264_DEC_ATTR_S pDecAttr); [DllImport("hi_h264dec_w.dll", EntryPoint = "Hi264DecDestroy", CallingConvention = CallingConvention.Cdecl)] public static extern void Hi264DecDestroy(IntPtr hDec); [DllImport("hi_h264dec_w.dll", EntryPoint = "Hi264DecGetInfo", CallingConvention = CallingConvention.Cdecl)] public static extern int Hi264DecGetInfo(ref hiH264_LIBINFO_S pLibInfo); /// <summary> /// 對輸入的一段碼流進行解碼并按幀輸出圖像 /// </summary> /// <param name="hDec">解碼器句柄</param> /// <param name="pStream">碼流起始地址</param> /// <param name="iStreamLen">碼流長度</param> /// <param name="ullPTS">時間戳信息</param> /// <param name="pDecFrame">圖像信息</param> /// <param name="uFlags">解碼模式 0:正常解碼;1、解碼完畢并要求解碼器輸出殘留圖像</param> /// <returns></returns> [DllImport("hi_h264dec_w.dll", EntryPoint = "Hi264DecFrame", CallingConvention = CallingConvention.Cdecl)] public static extern int Hi264DecFrame(IntPtr hDec, IntPtr pStream, uint iStreamLen, ulong ullPTS, ref hiH264_DEC_FRAME_S pDecFrame, uint uFlags); [DllImport("hi_h264dec_w.dll", EntryPoint = "Hi264DecAU", CallingConvention = CallingConvention.Cdecl)] public static extern int Hi264DecAU(IntPtr hDec, IntPtr pStream, uint iStreamLen, ulong ullPTS, ref hiH264_DEC_FRAME_S pDecFrame, uint uFlags); /// <summary> /// 解碼器屬性信息。 /// </summary> [StructLayout(LayoutKind.Sequential)] public struct hiH264_DEC_ATTR_S { /// <summary> /// 解碼器輸出圖像格式,目前解碼庫只支持YUV420圖像格式 /// </summary> public uint uPictureFormat; /// <summary> /// 輸入碼流格式 0x00: 目前解碼庫只支持以“00 00 01”為nalu分割符的流式H.264碼流 /// </summary> public uint uStreamInType; /// <summary> /// 圖像寬度 /// </summary> public uint uPicWidthInMB; /// <summary> /// 圖像高度 /// </summary> public uint uPicHeightInMB; /// <summary> /// 參考幀數目 /// </summary> public uint uBufNum; /// <summary> /// 解碼器工作模式 /// </summary> public uint uWorkMode; /// <summary> /// 用戶私有數據 /// </summary> public IntPtr pUserData; /// <summary> /// 保留字 /// </summary> public uint uReserved; } /// <summary> /// 解碼器輸出圖像信息數據結構 /// </summary> [StructLayout(LayoutKind.Sequential)] public struct hiH264_DEC_FRAME_S { /// <summary> /// Y分量地址 /// <
新聞熱點
疑難解答