在《Kinect嚐鮮(1)》中提到了Kinect程序的兩種模型——事件模型和輪詢模型。其中事件模型是經過C#的事件與委託的編程方式,在Kinect採集完成一幀的數據後觸發某事件,經過該事件委託的方法完成相關的數據處理。而輪詢模型則是將控制權還給應用程序,由應用程序向Kinect主動去「要」數據。事件模型的開發難度教低,同時限制也比較大;而輪詢模型則更高效,更適合多線程應用程序。編程
private void StartKinect() { if (KinectSensor.KinectSensors.Count <= 0) { MessageBox.Show("No Kinect device foound!"); return; } _kinect = KinectSensor.KinectSensors[0]; _kinect.ColorStream.Enabl(ColorImageFormat.RgbResolution640x480Fps30); _kinect.DepthStream.Enabl(DepthImageFormat.Resolution640x480Fps30); _kinect.SkeletonStream.Enable(); _kinect.ColorFrameReady += newEventHandler<ColorImageFrameReadyEventArgs(KinectColorFrameReady); _kinect.DepthFrameReady += newEventHandler<DepthImageFrameReadyEventArgs(KinectDepthFrameReady); _kinect.SkeletonFrameReady += newEventHandler<SkeletonFrameReadyEventArgs(KinectSkeletonFrameReady); _kinect.Start(); }
上面代碼是一個典型的事件模型,ColorFrameReady、DepthFrameReady和SkeletonFrameReady是Kinect封裝好的三種事件,能夠在其被觸發的時候執行委託方法KinectColorFrameReady、KinectDepthFrameReady和KinectSkeletonFrameReady,這三個方法都是自定義的。以KinectColorFrameReady爲例:segmentfault
private void KinectColorFrameReady(object sender,ColorImageFrameReadyEventArgs e) { using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame()) { if (colorImageFrame == null) return; byte[] pixels = new byte[colorImageFrame.PixelDataLength]; colorImageFrame.CopyPixelDataTo(pixels); int stride = colorImageFrame.Width * 4; colorImage.Source = BitmapSource.Create(colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride); } }
每次Kinect已經採集到一幀的彩色圖像數據後會觸發ColorFrameReady事件,該事件委託執行KinectColorFrameReady方法,在該方法中將彩色視頻流繪製到colorImage控件上。數組
在輪詢模型中,既然主動權被交還給應用程序,那麼我但願將Kinect數據收集與處理與應用程序邏輯分隔開,因而將Kinect有關的方法封裝到KinectX類中,其中CustomKinectException是自定義的異常。多線程
public KinectX() { if (KinectSensor.KinectSensors.Count <= 0) { throw new CustomKinectException("No Kinect Found"); } _kinect = KinectSensor.KinectSensors[0]; _kinect.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30); _kinect.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30); _kinect.SkeletonStream.Enable(); } public void Start(/*args*/) { /*......*/ _kinect.Start(); }
在應用程序中啓動Kinect後,能夠將Kinect相關的內容放到另外一個線程中執行。ide
private void Window_Loaded(object sender, RoutedEventArgs e) { kinectX = new KinectX(); try { kinectX.Start(KinectX.StartModel.StreamAll); } catch (CustomKinectException exc) { textBlock.Text = exc.ToString(); } renderThread = new Thread(new ThreadStart(RenderImage)); renderThread.Start(); } private void RenderImage() { while (isWindowsClosing == false) { kinectX.GetColorStream(); kinectX.GetDepthStream(); if (kinectX.ColorImageAvailable == false) continue; if (kinectX.DepthImageAvailable == false) continue; colorImage.Dispatcher.Invoke( delegate { colorImage.Source = BitmapSource.Create(kinectX.colorImageFrameWidth, kinectX.colorImageFrameHeight, 96, 96, PixelFormats.Bgr32, null, kinectX.GetColorPixelsData, kinectX.colorStride); }); depthImage.Dispatcher.Invoke( delegate { depthImage.Source = BitmapSource.Create(kinectX.depthImageFrameWidth, kinectX.depthImageFrameHeight, 96, 96, PixelFormats.Bgr32, null, kinectX.GetDepthColorBytePixelData, kinectX.depthStride); }); } }
上面代碼中kinectX.GetColorStream()和kinectX.GetDepthStream()是使用輪詢模型向Kinect「要」數據,具體內容以下:編碼
public void GetColorStream() { using (ColorImageFrame colorImageFrame = _kinect.ColorStream.OpenNextFrame(30)) { if (colorImageFrame == null) { ColorImageAvailable = false; return; } byte[] pixels = new byte[colorImageFrame.PixelDataLength]; colorImageFrame.CopyPixelDataTo(pixels); colorImageFrameWidth = colorImageFrame.Width; colorImageFrameHeight = colorImageFrame.Height; colorStride = colorImageFrame.Width * 4; colorPixelsData = pixels; ColorImageAvailable = true; } } public void GetDepthStream() { using (DepthImageFrame depthImageFrame = _kinect.DepthStream.OpenNextFrame(30)) { if (depthImageFrame == null) { depthImageAvailable = false; return; } short[] depthPixelData = new short[depthImageFrame.PixelDataLength]; depthImageFrame.CopyPixelDataTo(depthPixelData); depthImageFrameWidth = depthImageFrame.Width; depthImageFrameHeight = depthImageFrame.Height; byte[] pixels = ConvertDepthFrameToColorFrame(depthPixelData, _kinect.DepthStream); depthBytePixelsData = pixels; depthStride = depthImageFrame.Width * 4; depthImageAvailable = true; } }
然而在主線程和XAML控件交互時,又使用了委託機制去向kinectX對象「要」數據,這樣處理不是很高效。但當前尚未想到比較好的解決辦法,待往後解決此問題後修改。spa
以深度數據爲例。_kinect.DepthStream.OpenNextFrame(30)意思是讓Kinect返回下一幀的深度數據流,時間間隔爲30ms。因爲啓動時選擇的幀率時30FPS,因此每隔30ms去「要」一次數據比較合適。若是參數設置爲0,也並非時間間隔爲0,由於輪詢模型下該方法調用也須要消耗必定時間,雖然很是小,而且在如此短的時間間隔內Kinect並不能採集完一幀的數據,因此此時返回的depthImageFrame爲null,屢次方法調用只能獲取一次有效的結果,這樣會形成沒必要要的資源浪費。線程
class KinectX { private KinectSensor _kinect; private DepthImageStream depthImageStream; private ColorImageStream colorImageStream; private SkeletonStream skeletonStream; private SkeletonFrame skeletonFrame; private Skeleton[] skeletons; private WriteableBitmap manBitmap; private Int32Rect manImageRect; private Joint[] joints; const float maxDepthDistance = 4095; const float minDepthDistance = 850; const float maxDepthDistancOddset = maxDepthDistance - minDepthDistance; public int manBitmapStride; public int colorStride; public int depthStride; public int colorImageFrameWidth; public int colorImageFrameHeight; public int depthImageFrameWidth; public int depthImageFrameHeight; private const int redIndex = 2; private const int greenIndex = 1; private const int blueIndex = 0; private static readonly int bgr32BytesPerPixel = (PixelFormats.Bgr32.BitsPerPixel + 7) / 8; private static readonly int[] intensityShiftByPlayerR = { 1, 2, 0, 2, 0, 0, 2, 0 }; private static readonly int[] intensityShiftByPlayerG = { 1, 2, 2, 0, 2, 0, 0, 1 }; private static readonly int[] intensityShiftByPlayerB = { 1, 0, 2, 2, 0, 2, 0, 2 }; private short[] depthShortPixelsData; private byte[] colorPixelsData; private byte[] depthBytePixelsData; private bool colorImageAvailable; private bool depthImageAvailable; public enum StartModel { EventAllFrame, EventApartFrame, EventColorFrame, EventDepthFrame, EventSkeletonFrame, StreamAll, StreamColor, StreamSkeleton, StreamDepth }; public KinectX() { if (KinectSensor.KinectSensors.Count <= 0) { throw new CustomKinectException("No Kinect Found"); } _kinect = KinectSensor.KinectSensors[0]; _kinect.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30); _kinect.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30); _kinect.SkeletonStream.Enable(); } public void Start(StartModel startModel) { switch (startModel) { case (StartModel.EventAllFrame): { _kinect.AllFramesReady += KinectAllFramesReady; break; } case (StartModel.EventApartFrame): { _kinect.ColorFrameReady += KinectColorFrameReady; _kinect.DepthFrameReady += KinectDepthFrameReady; _kinect.SkeletonFrameReady += KinectSkeletonFrameReady; break; } default: break; } _kinect.Start(); } private void KinectAllFramesReady(object sender, AllFramesReadyEventArgs e) { using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) { using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) { } } } public void Release() { if (_kinect != null) { if (_kinect.Status == KinectStatus.Connected) { _kinect.Stop(); } } } public void ViewUp() { if (_kinect == null) return; if (!_kinect.IsRunning) return; if (_kinect.ElevationAngle <= _kinect.MaxElevationAngle - 5) { _kinect.ElevationAngle += 5; } } public void ViewDown() { if (_kinect == null) return; if (!_kinect.IsRunning) return; if (_kinect.ElevationAngle >= _kinect.MinElevationAngle + 5) { _kinect.ElevationAngle -= 5; } } public void GetColorStream() { using (ColorImageFrame colorImageFrame = _kinect.ColorStream.OpenNextFrame(30)) { if (colorImageFrame == null) { ColorImageAvailable = false; return; } byte[] pixels = new byte[colorImageFrame.PixelDataLength]; colorImageFrame.CopyPixelDataTo(pixels); colorImageFrameWidth = colorImageFrame.Width; colorImageFrameHeight = colorImageFrame.Height; colorStride = colorImageFrame.Width * 4; colorPixelsData = pixels; ColorImageAvailable = true; } } public void GetDepthStream() { using (DepthImageFrame depthImageFrame = _kinect.DepthStream.OpenNextFrame(30)) { if (depthImageFrame == null) { depthImageAvailable = false; return; } short[] depthPixelData = new short[depthImageFrame.PixelDataLength]; depthImageFrame.CopyPixelDataTo(depthPixelData); depthImageFrameWidth = depthImageFrame.Width; depthImageFrameHeight = depthImageFrame.Height; byte[] pixels = ConvertDepthFrameToColorFrame(depthPixelData, _kinect.DepthStream); depthBytePixelsData = pixels; depthStride = depthImageFrame.Width * 4; depthImageAvailable = true; } } public void GetSkeletonStream() { } public void GetSkeletons() { skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength]; skeletonFrame.CopySkeletonDataTo(skeletons); } public byte[] GetDepthColorBytePixelData { get { return depthBytePixelsData; } } public byte[] GetColorPixelsData { get { return colorPixelsData; } } public short[] GetDepthShortPixelData { get { return depthShortPixelsData; } } public bool ColorImageAvailable { get { return colorImageAvailable; } set { colorImageAvailable = value; } } public bool DepthImageAvailable { get { return depthImageAvailable; } set { depthImageAvailable = value; } } private void GetSkeletonStreamAsync() { skeletonFrame = skeletonStream.OpenNextFrame(34); } private void KinectSkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e) { bool isSkeletonDataReady = false; using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame()) { if (skeletonFrame != null) { skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength]; skeletonFrame.CopySkeletonDataTo(skeletons); isSkeletonDataReady = true; } } } private void KinectColorFrameReady(object sender, ColorImageFrameReadyEventArgs e) { using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame()) { if (colorImageFrame == null) return; byte[] pixels = new byte[colorImageFrame.PixelDataLength]; colorImageFrame.CopyPixelDataTo(pixels); colorStride = colorImageFrame.Width * 4; colorPixelsData = pixels; ColorImageAvailable = true; } } private void KinectDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e) { } private void RenderMan(ColorImageFrame colorFrame, DepthImageFrame depthFrame) { if (!(depthFrame != null && colorFrame != null)) return; int depthPixelIndex; int playerIndex; int colorPixelIndex; ColorImagePoint colorPoint; int colorStride = colorFrame.BytesPerPixel * colorFrame.Width; int bytePerPixelOfBgrImage = 4; int playerImageIndex = 0; depthFrame.CopyPixelDataTo(depthShortPixelsData); colorFrame.CopyPixelDataTo(colorPixelsData); byte[] manImage = new byte[depthFrame.Height * manBitmapStride]; for (int j = 0; j < depthFrame.Height; j++) { for (int i = 0; i < depthFrame.Width; i++, playerImageIndex += bytePerPixelOfBgrImage) { depthPixelIndex = i + (j * depthFrame.Width); playerIndex = depthShortPixelsData[depthPixelIndex] & DepthImageFrame.PlayerIndexBitmask; //用戶索引標識不爲0,則該處屬於人體部位。 if (playerIndex != 0) { //深度圖像中某一個點映射到彩色圖像座標點 colorPoint = _kinect.MapDepthToColorImagePoint(depthFrame.Format, i, j, depthShortPixelsData[depthPixelIndex], colorFrame.Format); colorPixelIndex = (colorPoint.X * colorFrame.BytesPerPixel) + (colorPoint.Y * colorStride); manImage[playerImageIndex] = colorPixelsData[colorPixelIndex];//Blue manImage[playerImageIndex + 1] = colorPixelsData[colorPixelIndex];//Green manImage[playerImageIndex + 2] = colorPixelsData[colorPixelIndex];//Red manImage[playerImageIndex + 3] = 0xFF;//Alpha } manBitmap.WritePixels(manImageRect, manImage, manBitmapStride, 0); } } } /// <summary> /// 單色直方圖計算公式,返回256色灰階,顏色越黑越遠。 /// </summary> /// <param name="dis">深度值,有效值爲......</param> /// <returns></returns> private static byte CalculateIntensityFromDepth(int dis) { return (byte)(255 - (255 * Math.Max(dis - minDepthDistance, 0) / maxDepthDistancOddset)); } /// <summary> /// 生成BGR32格式的圖片字節數組 /// </summary> /// <param name="depthImageFrame"></param> /// <returns></returns> private byte[] ConvertDepthFrameToGrayFrame(DepthImageFrame depthImageFrame) { short[] rawDepthData = new short[depthImageFrame.PixelDataLength]; depthImageFrame.CopyPixelDataTo(rawDepthData); byte[] pixels = new byte[depthImageFrame.Height * depthImageFrame.Width * 4]; for (int depthIndex = 0, colorIndex = 0; depthIndex < rawDepthData.Length && colorIndex < pixels.Length; depthIndex++, colorIndex += 4) { int player = rawDepthData[depthIndex] & DepthImageFrame.PlayerIndexBitmask; int depth = rawDepthData[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth; if (depth <= 900) { //離Kinect很近 pixels[colorIndex + blueIndex] = 255; pixels[colorIndex + greenIndex] = 0; pixels[colorIndex + redIndex] = 0; } else if (depth > 900 && depth < 2000) { pixels[colorIndex + blueIndex] = 0; pixels[colorIndex + greenIndex] = 255; pixels[colorIndex + redIndex] = 0; } else if (depth >= 2000) { //離Kinect超過2米 pixels[colorIndex + blueIndex] = 0; pixels[colorIndex + greenIndex] = 0; pixels[colorIndex + redIndex] = 255; } //單色直方圖着色 byte intensity = CalculateIntensityFromDepth(depth); pixels[colorIndex + blueIndex] = intensity; pixels[colorIndex + greenIndex] = intensity; pixels[colorIndex + redIndex] = intensity; //若是是人體區域,用亮綠色標記 if (player > 0) { pixels[colorIndex + blueIndex] = Colors.LightGreen.B; pixels[colorIndex + greenIndex] = Colors.LightGreen.G; pixels[colorIndex + redIndex] = Colors.LightGreen.R; } } return pixels; } /// <summary> /// 將16位灰階深度圖轉爲32位彩色深度圖 /// </summary> /// <param name="depthImageFrame">16位灰階深度圖</param> /// <param name="depthImageStream">用於得到深度數據流的相關屬性</param> /// <returns></returns> private byte[] ConvertDepthFrameToColorFrame(short[] depthImageFrame, DepthImageStream depthImageStream) { byte[] depthFrame32 = new byte[depthImageStream.FrameWidth * depthImageStream.FrameHeight * bgr32BytesPerPixel]; //經過常量獲取有效視距,不用硬編碼 int tooNearDepth = depthImageStream.TooNearDepth; int tooFarDepth = depthImageStream.TooFarDepth; int unknowDepth = depthImageStream.UnknownDepth; for (int i16 = 0, i32 = 0; i16 < depthImageFrame.Length && i32 < depthFrame32.Length; i16++, i32 += 4) { int player = depthImageFrame[i16] & DepthImageFrame.PlayerIndexBitmask; int realDepth = depthImageFrame[i16] >> DepthImageFrame.PlayerIndexBitmaskWidth; //經過位運算,將13位的深度圖裁剪位8位 byte intensity = (byte)(~(realDepth >> 4)); if (player == 0 && realDepth == 0) { depthFrame32[i32 + redIndex] = 255; depthFrame32[i32 + greenIndex] = 255; depthFrame32[i32 + blueIndex] = 255; } else if (player == 0 && realDepth == tooFarDepth) { //深紫色 depthFrame32[i32 + redIndex] = 66; depthFrame32[i32 + greenIndex] = 0; depthFrame32[i32 + blueIndex] = 66; } else if (player == 0 && realDepth == unknowDepth) { //深棕色 depthFrame32[i32 + redIndex] = 66; depthFrame32[i32 + greenIndex] = 66; depthFrame32[i32 + blueIndex] = 33; } else { depthFrame32[i32 + redIndex] = (byte)(intensity >> intensityShiftByPlayerR[player]); depthFrame32[i32 + greenIndex] = (byte)(intensity >> intensityShiftByPlayerG[player]); depthFrame32[i32 + blueIndex] = (byte)(intensity >> intensityShiftByPlayerB[player]); } } return depthFrame32; } } class CustomKinectException : ApplicationException { public CustomKinectException() { } public CustomKinectException(string message) : base(message) { } public CustomKinectException(string message, Exception inner) : base(message, inner) { } }