FFMpeg是一套C編譯的開源工具集。主要用於視頻處理,能夠編解碼視頻,創建流媒體服務器等等。官方網站:http://ffmpeg.org/html
FFMpeg.AutoGen封裝方法以方便C#調用FFmpeg。項目地址:https://github.com/Ruslan-B/FFmpeg.AutoGen。可使用NuGet安裝。git
AutoGen只是封裝調用FFmpeg,程序仍是須要下在FFmpeg工具放在程序目錄裏,且版本要對應。 筆者用FFMpeg.AutoGetn的官方example代碼介紹一下FFMpege如何使用(源代碼在其github庫裏)。github
example是一個命令行程序,mian函數裏面的代碼以下。我將經過此函數調用順序介紹ffmpeg.AutoGet的用法。算法
1.註冊FFmpeg庫。實際就將ffmpeg庫的地址告訴autogen緩存
2.ffmpeg 一些調用其的配置(可選)
2.1 配置日誌輸出
2.2配置硬件解碼器ffmpeg是支持硬解的.具體支持類型能夠參考ffmpeg官方文檔。轉載網友摘錄的ffmpeg支持硬解編碼的枚舉。服務器
3.解碼函數DecodeAllFramesToImages
3.1 VideoStreamDecoder類
3.2 VideoFrameConverter類
3.3 相關數據結構AVPacket,AVFrame數據結構
本文使用ffmpeg.autogen版本4.2.2,對應ffmpeg版本也是4.2.2。框架
1 private static void Main(string[] args) 2 { 3 Console.WriteLine("Current directory: " + Environment.CurrentDirectory); 4 Console.WriteLine("Running in {0}-bit mode.", Environment.Is64BitProcess ? "64" : "32"); 5 6 FFmpegBinariesHelper.RegisterFFmpegBinaries(); 7 8 Console.WriteLine($"FFmpeg version info: {ffmpeg.av_version_info()}"); 9 10 //配置ffmpeg輸出日誌 11 SetupLogging(); 12 //配置硬件解碼器 13 ConfigureHWDecoder(out var deviceType); 14 15 //解碼 16 Console.WriteLine("Decoding..."); 17 DecodeAllFramesToImages(deviceType); 18 19 //編碼 20 Console.WriteLine("Encoding..."); 21 EncodeImagesToH264(); 22 }
1 FFmpegBinariesHelper.RegisterFFmpegBinaries();
註冊FFmpeg,這裏的FFmpegBinariesHelper類須要在程序裏重寫。我這裏摘抄官方demo的代碼ide
1 namespace FFmpeg.AutoGen.Example 2 { 3 public class FFmpegBinariesHelper 4 { 5 internal static void RegisterFFmpegBinaries() 6 { 7 var current = Environment.CurrentDirectory; 8 var probe = Path.Combine("FFmpeg", "bin", Environment.Is64BitProcess ? "x64" : "x86"); 9 while (current != null) 10 { 11 var ffmpegBinaryPath = Path.Combine(current, probe); 12 if (Directory.Exists(ffmpegBinaryPath)) 13 { 14 Console.WriteLine($"FFmpeg binaries found in: {ffmpegBinaryPath}"); 15 ffmpeg.RootPath = ffmpegBinaryPath; 16 return; 17 } 18 19 current = Directory.GetParent(current)?.FullName; 20 } 21 } 22 } 23 }
代碼的功能就是尋找ffmpeg的路徑。函數
核心代碼:
1 ffmpeg.RootPath = ffmpegBinaryPath;
1 /// <summary> 2 /// 配置日誌 3 /// </summary> 4 private static unsafe void SetupLogging() 5 { 6 ffmpeg.av_log_set_level(ffmpeg.AV_LOG_VERBOSE); 7 8 // do not convert to local function 9 av_log_set_callback_callback logCallback = (p0, level, format, vl) => 10 { 11 if (level > ffmpeg.av_log_get_level()) return; 12 13 var lineSize = 1024; 14 var lineBuffer = stackalloc byte[lineSize]; 15 var printPrefix = 1; 16 ffmpeg.av_log_format_line(p0, level, format, vl, lineBuffer, lineSize, &printPrefix); 17 var line = Marshal.PtrToStringAnsi((IntPtr) lineBuffer); 18 Console.ForegroundColor = ConsoleColor.Yellow; 19 Console.Write(line); 20 Console.ResetColor(); 21 }; 22 23 ffmpeg.av_log_set_callback(logCallback); 24 }
主要就是配置日誌回調。
核心代碼:
1 ffmpeg.av_log_set_callback(logCallback)
1 enum AVHWDeviceType { 2 AV_HWDEVICE_TYPE_NONE, 3 AV_HWDEVICE_TYPE_VDPAU, 4 AV_HWDEVICE_TYPE_CUDA, 5 AV_HWDEVICE_TYPE_VAAPI, 6 AV_HWDEVICE_TYPE_DXVA2, 7 AV_HWDEVICE_TYPE_QSV, 8 AV_HWDEVICE_TYPE_VIDEOTOOLBOX, 9 AV_HWDEVICE_TYPE_D3D11VA, 10 AV_HWDEVICE_TYPE_DRM, 11 AV_HWDEVICE_TYPE_OPENCL, 12 AV_HWDEVICE_TYPE_MEDIACODEC, 13 };
1 /// <summary> 2 /// 配置硬件解碼器 3 /// </summary> 4 /// <param name="HWtype"></param> 5 private static void ConfigureHWDecoder(out AVHWDeviceType HWtype) 6 { 7 HWtype = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE; 8 Console.WriteLine("Use hardware acceleration for decoding?[n]"); 9 var key = Console.ReadLine(); 10 var availableHWDecoders = new Dictionary<int, AVHWDeviceType>(); 11 if (key == "y") 12 { 13 Console.WriteLine("Select hardware decoder:"); 14 var type = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE; 15 var number = 0; 16 while ((type = ffmpeg.av_hwdevice_iterate_types(type)) != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) 17 { 18 Console.WriteLine($"{++number}. {type}"); 19 availableHWDecoders.Add(number, type); 20 } 21 if (availableHWDecoders.Count == 0) 22 { 23 Console.WriteLine("Your system have no hardware decoders."); 24 HWtype = 。; 25 return; 26 } 27 int decoderNumber = availableHWDecoders.SingleOrDefault(t => t.Value == AVHWDeviceType.AV_HWDEVICE_TYPE_DXVA2).Key; 28 if (decoderNumber == 0) 29 decoderNumber = availableHWDecoders.First().Key; 30 Console.WriteLine($"Selected [{decoderNumber}]"); 31 int.TryParse(Console.ReadLine(),out var inputDecoderNumber); 32 availableHWDecoders.TryGetValue(inputDecoderNumber == 0 ? decoderNumber: inputDecoderNumber, out HWtype); 33 } 34 } 35
核心代碼:ffmpeg.av_hwdevice_iterate_types(type)得到系統支持的硬件解碼。
ffmpeg.av_hwdevice_iterate_types(type)根據傳入的硬件解碼其類型,返回AVHWDeviceType枚舉裏下一個系統支持的硬件解碼器類型。
1 /// <summary> 2 /// 解碼 3 /// </summary> 4 /// <param name="HWDevice"></param> 5 private static unsafe void DecodeAllFramesToImages(AVHWDeviceType HWDevice) 6 { 7 // decode all frames from url, please not it might local resorce, e.g. string url = "../../sample_mpeg4.mp4"; 8 var url = "http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4"; // be advised this file holds 1440 frames 9 using (var vsd = new VideoStreamDecoder(url,HWDevice)) 10 { 11 Console.WriteLine($"codec name: {vsd.CodecName}"); 12 13 var info = vsd.GetContextInfo(); 14 info.ToList().ForEach(x => Console.WriteLine($"{x.Key} = {x.Value}")); 15 16 var sourceSize = vsd.FrameSize; 17 var sourcePixelFormat = HWDevice == AVHWDeviceType.AV_HWDEVICE_TYPE_NONE ? vsd.PixelFormat : GetHWPixelFormat(HWDevice); 18 var destinationSize = sourceSize; 19 var destinationPixelFormat = AVPixelFormat.AV_PIX_FMT_BGR24; 20 using (var vfc = new VideoFrameConverter(sourceSize, sourcePixelFormat, destinationSize, destinationPixelFormat)) 21 { 22 var frameNumber = 0; 23 while (vsd.TryDecodeNextFrame(out var frame)) 24 { 25 var convertedFrame = vfc.Convert(frame); 26 27 using (var bitmap = new Bitmap(convertedFrame.width, convertedFrame.height, convertedFrame.linesize[0], PixelFormat.Format24bppRgb, (IntPtr) convertedFrame.data[0])) 28 bitmap.Save($"frame.{frameNumber:D8}.jpg", ImageFormat.Jpeg); 29 30 Console.WriteLine($"frame: {frameNumber}"); 31 frameNumber++; 32 } 33 } 34 } 35 }
example源代碼裏解碼主要使用VideoStreamDecoder和VideoFrameConverter兩個類。這兩個類不是FFMpeg.AutoGen裏的類型,而是example代碼裏。也就是說解碼工做是須要用戶本身封裝解碼類。圖省事能夠直接照搬example裏的代碼。筆者很推薦讀一下這兩個類的源代碼(能夠在文檔末尾查附件看註釋過的這兩個類),能夠搞清楚ffmpeg的解碼流程。
VideoStreamDecoder做用:經過配置解碼器獲取實際有用的幀數據,大概的流程是:
VideoFrameConverter做用:對幀數據進行格式轉換,格式轉換的大概流程
其中有兩個概念包和幀須要注意一下,這裏轉載灰色飄零博客裏描述(參考文檔【5】):
AVPacket
用於存儲壓縮的數據,分別包括有音頻壓縮數據,視頻壓縮數據和字幕壓縮數據。它一般在解複用操做後存儲壓縮數據,而後做爲輸入傳給解碼器。或者由編碼器輸出而後傳遞給複用器。對於視頻壓縮數據,一個AVPacket一般包括一個視頻幀。對於音頻壓縮數據,可能包括幾個壓縮的音頻幀。
AVFrame
用於存儲解碼後的音頻或者視頻數據。AVFrame必須經過av_frame_alloc進行分配,經過av_frame_free釋放。
二者之間的關係
av_read_frame獲得壓縮的數據包AVPacket,通常有三種壓縮的數據包(視頻、音頻和字幕),都用AVPacket表示。
而後調用avcodec_send_packet 和 avcodec_receive_frame對AVPacket進行解碼獲得AVFrame。
注:從 FFmpeg 3.x 開始,avcodec_decode_video2 就被廢棄了,取而代之的是 avcodec_send_packet 和 avcodec_receive_frame。
參考文檔:
【2】FFmpeg開發之PacketQueue中AVPacket和AVFrame關係
【5】FFMPEG-數據結構解釋(AVCodecContext,AVStream,AVFormatContext)
【6】ffmpeg中的sws_scale算法性能測試 一片雲霧 2011-10-29
【7】av_image_fill_arrays詳解 韭菜大蔥餡雞蛋 2019-12-14
【8】FFmpeg av_image_fill_arrays填充AVFrame數據緩衝 fengyuzaitu 2019-11-12
附件1 Example中unsafe void DecodeAllFramesToImages(AVHWDeviceType HWDevice)解碼函數源碼及註釋
1 /// <summary> 2 /// 解碼 3 /// </summary> 4 /// <param name="HWDevice">硬件解碼類型</param> 5 private static unsafe void DecodeAllFramesToImages(AVHWDeviceType HWDevice) 6 { 7 // decode all frames from url, please not it might local resorce, e.g. string url = "../../sample_mpeg4.mp4"; 8 var url = "http://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4"; // be advised this file holds 1440 frames 9 10 //使用自行編寫的視頻解碼器類進行解碼 11 using (var vsd = new VideoStreamDecoder(url,HWDevice)) 12 { 13 Console.WriteLine($"codec name: {vsd.CodecName}"); 14 15 //獲取媒體信息 16 var info = vsd.GetContextInfo(); 17 info.ToList().ForEach(x => Console.WriteLine($"{x.Key} = {x.Value}")); 18 19 var sourceSize = vsd.FrameSize; 20 //資源編碼格式 21 var sourcePixelFormat = HWDevice == AVHWDeviceType.AV_HWDEVICE_TYPE_NONE ? vsd.PixelFormat : GetHWPixelFormat(HWDevice); 22 //目標尺寸與原尺寸一致 23 var destinationSize = sourceSize; 24 //目標媒體格式是bit類型 25 var destinationPixelFormat = AVPixelFormat.AV_PIX_FMT_BGR24; 26 //幀格式轉換 27 using (var vfc = new VideoFrameConverter(sourceSize, sourcePixelFormat, destinationSize, destinationPixelFormat)) 28 { 29 var frameNumber = 0; 30 while (vsd.TryDecodeNextFrame(out var frame)) 31 { 32 var convertedFrame = vfc.Convert(frame); 33 34 using (var bitmap = new Bitmap(convertedFrame.width, convertedFrame.height, convertedFrame.linesize[0], PixelFormat.Format24bppRgb, (IntPtr) convertedFrame.data[0])) 35 bitmap.Save($"frame.{frameNumber:D8}.jpg", ImageFormat.Jpeg); 36 37 Console.WriteLine($"frame: {frameNumber}"); 38 frameNumber++; 39 } 40 } 41 } 42 } 43
附件2 Example中解碼類VideoStreamDecoder類源碼及註釋
1 using System; 2 using System.Collections.Generic; 3 using System.Drawing; 4 using System.IO; 5 using System.Runtime.InteropServices; 6 7 namespace FFmpeg.AutoGen.Example 8 { 9 public sealed unsafe class VideoStreamDecoder : IDisposable 10 { 11 private readonly AVCodecContext* _pCodecContext; 12 private readonly AVFormatContext* _pFormatContext; 13 private readonly int _streamIndex; 14 // 15 private readonly AVFrame* _pFrame; 16 // 17 private readonly AVFrame* _receivedFrame; 18 private readonly AVPacket* _pPacket; 19 /// <summary> 20 /// 視頻解碼器 21 /// </summary> 22 /// <param name="url">視頻流URL</param> 23 /// <param name="HWDeviceType">硬件解碼器類型(默認AVHWDeviceType.AV_HWDEVICE_TYPE_NONE)</param> 24 public VideoStreamDecoder(string url, AVHWDeviceType HWDeviceType = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) 25 { 26 //分配一個AVFormatContext 27 _pFormatContext = ffmpeg.avformat_alloc_context(); 28 //分配一個AVFrame 29 _receivedFrame = ffmpeg.av_frame_alloc(); 30 31 var pFormatContext = _pFormatContext; 32 //將源音視頻流傳遞給ffmpeg即ffmpeg打開源視頻流 33 ffmpeg.avformat_open_input(&pFormatContext, url, null, null).ThrowExceptionIfError(); 34 //獲取音視頻流信息 35 ffmpeg.avformat_find_stream_info(_pFormatContext, null).ThrowExceptionIfError(); 36 AVCodec* codec = null; 37 //在源裏找到最佳的流,若是指定了解碼器,則根據解碼器尋找流,將解碼器傳遞給codec 38 _streamIndex = ffmpeg.av_find_best_stream(_pFormatContext, AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0).ThrowExceptionIfError(); 39 //根據解碼器分配一個AVCodecContext ,僅僅分配工具,尚未初始化。 40 _pCodecContext = ffmpeg.avcodec_alloc_context3(codec); 41 //若是硬解碼 42 if (HWDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE) 43 { 44 //根據硬件編碼類型建立AVHWDeviceContext,存在AVFormatContext.hw_device_ctx (_pCodecContext->hw_device_ctx) 45 ffmpeg.av_hwdevice_ctx_create(&_pCodecContext->hw_device_ctx, HWDeviceType, null, null, 0).ThrowExceptionIfError(); 46 } 47 //將最佳流的格式參數傳遞給codecContext 48 ffmpeg.avcodec_parameters_to_context(_pCodecContext, _pFormatContext->streams[_streamIndex]->codecpar).ThrowExceptionIfError(); 49 //根據codec初始化pCodecContext 。與_pCodecContext = ffmpeg.avcodec_alloc_context3(codec);對應 50 ffmpeg.avcodec_open2(_pCodecContext, codec, null).ThrowExceptionIfError(); 51 52 CodecName = ffmpeg.avcodec_get_name(codec->id); 53 FrameSize = new Size(_pCodecContext->width, _pCodecContext->height); 54 PixelFormat = _pCodecContext->pix_fmt; 55 //分配AVPacket 56 /* AVPacket用於存儲壓縮的數據,分別包括有音頻壓縮數據,視頻壓縮數據和字幕壓縮數據。 57 它一般在解複用操做後存儲壓縮數據,而後做爲輸入傳給解碼器。或者由編碼器輸出而後傳遞給複用器。 58 對於視頻壓縮數據,一個AVPacket一般包括一個視頻幀。對於音頻壓縮數據,可能包括幾個壓縮的音頻幀。 59 */ 60 _pPacket = ffmpeg.av_packet_alloc(); 61 62 //分配AVFrame 63 /*AVFrame用於存儲解碼後的音頻或者視頻數據。 64 AVFrame必須經過av_frame_alloc進行分配,經過av_frame_free釋放。 65 */ 66 _pFrame = ffmpeg.av_frame_alloc(); 67 } 68 69 public string CodecName { get; } 70 public Size FrameSize { get; } 71 public AVPixelFormat PixelFormat { get; } 72 73 public void Dispose() 74 { 75 ffmpeg.av_frame_unref(_pFrame); 76 ffmpeg.av_free(_pFrame); 77 78 ffmpeg.av_packet_unref(_pPacket); 79 ffmpeg.av_free(_pPacket); 80 81 ffmpeg.avcodec_close(_pCodecContext); 82 var pFormatContext = _pFormatContext; 83 ffmpeg.avformat_close_input(&pFormatContext); 84 } 85 86 /// <summary> 87 /// 解碼下一幀幀 88 /// </summary> 89 /// <param name="frame">參數返回解碼後的幀</param> 90 /// <returns></returns> 91 public bool TryDecodeNextFrame(out AVFrame frame) 92 { 93 //取消幀的引用。幀將不會被任何資源引用 94 ffmpeg.av_frame_unref(_pFrame); 95 ffmpeg.av_frame_unref(_receivedFrame); 96 int error; 97 do 98 { 99 100 101 try 102 { 103 #region 讀取幀忽略無效幀 104 do 105 { 106 107 //讀取無效幀 108 error = ffmpeg.av_read_frame(_pFormatContext, _pPacket);//根據pFormatContext讀取幀,返回到Packet中 109 if (error == ffmpeg.AVERROR_EOF)//若是已是影視片流末尾則返回 110 { 111 frame = *_pFrame; 112 return false; 113 } 114 //數值是負數是錯誤信息 115 error.ThrowExceptionIfError(); 116 } while (_pPacket->stream_index != _streamIndex); //忽略掉音視頻流裏面與有效流(初始化(構造函數)時標記的_streamIndex)不一致的流 117 #endregion 118 119 //將幀數據放入解碼器 120 ffmpeg.avcodec_send_packet(_pCodecContext, _pPacket).ThrowExceptionIfError(); //將原始數據數據(_pPacket)做爲輸入提供給解碼器(_pCodecContext) 121 } 122 finally 123 { 124 //消除對_pPacket的引用 125 ffmpeg.av_packet_unref(_pPacket); 126 } 127 128 129 130 //讀取解碼器裏解碼(_pCodecContext)後的幀經過參數返回(_pFrame) 131 error = ffmpeg.avcodec_receive_frame(_pCodecContext, _pFrame); 132 133 } while (error == ffmpeg.AVERROR(ffmpeg.EAGAIN));//當返回值等於 EAGAIN(再試一次),認爲讀取幀結束 134 error.ThrowExceptionIfError(); 135 136 if (_pCodecContext->hw_device_ctx != null)//若是配置了硬件解碼則調用硬件解碼器解碼 137 { 138 //將_pFrame經過硬件解碼後放入_receivedFrame 139 ffmpeg.av_hwframe_transfer_data(_receivedFrame, _pFrame, 0).ThrowExceptionIfError(); 140 frame = *_receivedFrame; 141 } 142 else 143 { 144 frame = *_pFrame; 145 } 146 return true; 147 } 148 149 /// <summary> 150 /// 獲取媒體TAG信息 151 /// </summary> 152 /// <returns></returns> 153 public IReadOnlyDictionary<string, string> GetContextInfo() 154 { 155 AVDictionaryEntry* tag = null; 156 var result = new Dictionary<string, string>(); 157 while ((tag = ffmpeg.av_dict_get(_pFormatContext->metadata, "", tag, ffmpeg.AV_DICT_IGNORE_SUFFIX)) != null) 158 { 159 var key = Marshal.PtrToStringAnsi((IntPtr) tag->key); 160 var value = Marshal.PtrToStringAnsi((IntPtr) tag->value); 161 result.Add(key, value); 162 } 163 164 return result; 165 } 166 } 167 }
附件3 Example中幀轉換類VideoFrameConverter類源碼及註釋
1 using System; 2 using System.Drawing; 3 using System.Runtime.InteropServices; 4 5 namespace FFmpeg.AutoGen.Example 6 { 7 public sealed unsafe class VideoFrameConverter : IDisposable 8 { 9 private readonly IntPtr _convertedFrameBufferPtr; 10 private readonly Size _destinationSize; 11 private readonly byte_ptrArray4 _dstData; 12 private readonly int_array4 _dstLinesize; 13 private readonly SwsContext* _pConvertContext; 14 /// <summary> 15 /// 幀格式轉換 16 /// </summary> 17 /// <param name="sourceSize"></param> 18 /// <param name="sourcePixelFormat"></param> 19 /// <param name="destinationSize"></param> 20 /// <param name="destinationPixelFormat"></param> 21 public VideoFrameConverter(Size sourceSize, AVPixelFormat sourcePixelFormat, 22 Size destinationSize, AVPixelFormat destinationPixelFormat) 23 { 24 _destinationSize = destinationSize; 25 //分配並返回一個SwsContext。您須要它使用sws_scale()執行伸縮/轉換操做 26 //主要就是使用SwsContext進行轉換!!! 27 _pConvertContext = ffmpeg.sws_getContext(sourceSize.Width, sourceSize.Height, sourcePixelFormat, 28 destinationSize.Width, 29 destinationSize.Height 30 , destinationPixelFormat, 31 ffmpeg.SWS_FAST_BILINEAR //默認算法 還有其餘算法 32 , null 33 , null 34 , null //額外參數 在flasgs指定的算法,而使用的參數。若是 SWS_BICUBIC SWS_GAUSS SWS_LANCZOS這些算法。 這裏沒有使用 35 ); 36 if (_pConvertContext == null) throw new ApplicationException("Could not initialize the conversion context."); 37 //獲取媒體幀所須要的大小 38 var convertedFrameBufferSize = ffmpeg.av_image_get_buffer_size(destinationPixelFormat 39 , destinationSize.Width, destinationSize.Height 40 , 1); 41 //申請非託管內存,unsafe代碼 42 _convertedFrameBufferPtr = Marshal.AllocHGlobal(convertedFrameBufferSize); 43 44 //轉換幀的內存指針 45 _dstData = new byte_ptrArray4(); 46 _dstLinesize = new int_array4(); 47 48 //掛在幀數據的內存區把_dstData裏存的的指針指向_convertedFrameBufferPtr 49 ffmpeg.av_image_fill_arrays(ref _dstData, ref _dstLinesize 50 , (byte*) _convertedFrameBufferPtr 51 , destinationPixelFormat 52 , destinationSize.Width, destinationSize.Height 53 , 1); 54 } 55 56 public void Dispose() 57 { 58 Marshal.FreeHGlobal(_convertedFrameBufferPtr); 59 ffmpeg.sws_freeContext(_pConvertContext); 60 } 61 62 public AVFrame Convert(AVFrame sourceFrame) 63 { 64 //轉換格式 65 ffmpeg.sws_scale(_pConvertContext 66 , sourceFrame.data 67 , sourceFrame.linesize 68 , 0, sourceFrame.height 69 , _dstData, _dstLinesize); 70 71 var data = new byte_ptrArray8(); 72 data.UpdateFrom(_dstData); 73 var linesize = new int_array8(); 74 linesize.UpdateFrom(_dstLinesize); 75 76 return new AVFrame 77 { 78 data = data, 79 linesize = linesize, 80 width = _destinationSize.Width, 81 height = _destinationSize.Height 82 }; 83 } 84 } 85 }