首页 > 代码库 > Kinect 开发 —— 骨骼追踪 (下)
Kinect 开发 —— 骨骼追踪 (下)
基于景深数据的用户交互
骨骼数据中关节点不仅有X,Y值,还有一个深度值
除了使用WPF的3D特性外,在布局系统中可以根据深度值来设定可视化元素的尺寸大小来达到某种程序的立体效果。
下面的例子使用Canvas.ZIndex属性来设置元素的层次,手动设置控件的大小并使用ScaleTransform来根据深度值的改变来进行缩放。用户界面包括一些圆形,每一个圆代表一定的深度。应用程序跟踪用户的手部关节点,以手形图标显示,图标会根据用户手部关节点的深度值来进行缩放,用户离Kinect越近,手形图表越大,反之越小。
<Window x:Class="KinectDepthBasedInteraction.MainWindow" xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" Title="MainWindow" Height="1080" Width="1920" WindowState="Maximized" Background="White"> <Window.Resources> <Style x:Key="TargetLabel" TargetType="TextBlock"> <Setter Property="FontSize" Value="40"/> <Setter Property="Foreground" Value="White"/> <Setter Property="FontWeight" Value="Bold" /> <Setter Property="IsHitTestVisible" Value="False" /> </Style> </Window.Resources> <Viewbox> <Grid x:Name="LayoutRoot" Width="1920" Height="1280"> <Image x:Name="DepthImage"/> <StackPanel HorizontalAlignment="Left" VerticalAlignment="Top"> <TextBlock x:Name="DebugLeftHand" Style="{StaticResource TargetLabel}" Foreground="Black"/> <TextBlock x:Name="DebugRightHand" Style="{StaticResource TargetLabel}" Foreground="Black"/> </StackPanel> <Canvas> <Ellipse x:Name="Target3" Fill="Orange" Height="200" Width="200" Canvas.Left="776" Canvas.Top="162" Canvas.ZIndex="1040" /> <TextBlock Text="3" Canvas.Left="860" Canvas.Top="206" Panel.ZIndex="1040" Style="{StaticResource TargetLabel}" /> <Ellipse x:Name="Target4" Fill="Purple" Height="150" Width="150" Canvas.Left="732" Canvas.Top="320" Canvas.ZIndex="940" /> <TextBlock Text="4" Canvas.Left="840" Canvas.Top="372" Panel.ZIndex="940" Style="{StaticResource TargetLabel}" /> <Ellipse x:Name="Target5" Fill="Green" Height="120" Width="120" Canvas.Left="880" Canvas.Top="592" Canvas.ZIndex="840" /> <TextBlock Text="5" Canvas.Left="908" Canvas.Top="590" Panel.ZIndex="840" Style="{StaticResource TargetLabel}" /> <Ellipse x:Name="Target6" Fill="Blue" Height="100" Width="100" Canvas.Left="352" Canvas.Top="544" Canvas.ZIndex="740" /> <TextBlock Text="6" Canvas.Left="368" Canvas.Top="582" Panel.ZIndex="740" Style="{StaticResource TargetLabel}" /> <Ellipse x:Name="Target7" Fill="Red" Height="85" Width="85" Canvas.Left="378" Canvas.Top="192" Canvas.ZIndex="640" /> <TextBlock Text="7" Canvas.Left="422" Canvas.Top="226" Panel.ZIndex="640" Style="{StaticResource TargetLabel}" /> <Image x:Name="LeftHandElement" Source="/KinectDepthBasedInteraction;component/Images/hand.png" Width="80" Height="80" RenderTransformOrigin="0.5,0.5"> <Image.RenderTransform> <ScaleTransform x:Name="LeftHandScaleTransform" ScaleX="1" CenterY="-1" /> </Image.RenderTransform> </Image> <Image x:Name="RightHandElement" Source="/KinectDepthBasedInteraction;component/Images/hand.png" Width="80" Height="80" RenderTransformOrigin="0.5,0.5"> <Image.RenderTransform> <ScaleTransform x:Name="RightHandScaleTransform" CenterY="1" ScaleX="1" /> </Image.RenderTransform> </Image> </Canvas> </Grid> </Viewbox></Window>
不同颜色的圆形代表不同的深度,例如名为Target3的元素代表距离为3英尺。Target3的长宽比Target7要大,这简单的通过缩放可以实现。在我们的实例程序中,我们将其大小进行硬编码,实际的程序中,应该根据特定要求可以进行缩放。Canvas容器会根据子元素的Canvas.ZIndex的值对元素在垂直于计算机屏幕的方向上进行排列,例如最上面的元素,其Canvas.ZIndex最大。如果两个元素有相同的ZIndex值,那么会根据其在XAML中声明的顺序进行显示,在XAML中,后面声明的元素在之前声明的元素的前面。对于Canvas的所有子元素,ZIndex值越大,离屏幕越近,越小离屏幕越远。将深度值取反刚好能达到想要的效果。这意味这我们不能直接使用深度值来给ZIndex来赋值,而要对它进行一点转换。Kinect能够产生的最大深度值为13.4英尺,相应的,我们将Canvas.Zindex的取值范围设置为0-1340,将深度值乘以100能获得更好的精度。因此Target5的Canvas.ZIndex设置为840(13.5-5=8.4*100=840)。
设置好Canvas.ZIndex对于可视化元素的布局已经足够,但是不能够根据深度视觉效果对物体进行缩放。对于Kinect应用程序,Z值其他输入设备不能提供的,如果没有根据节点深度数据进行的缩放,那么这以独特的Z值就浪费了。缩放比例可能需要测试以后才能确定下来。
double z = hand.Position.Z*FeetPerMeters;Canvas.SetZIndex(cursorElement, (int)(1200- (z * 100))); cursorScale.ScaleX = 12 / z * (isLeft ? -1 : 1); cursorScale.ScaleY = 12 / z;编译并运行程序,将手距Kinect不同距离,界面上的手形图标会随着距离的大小进行缩放;同时界面上用于调试的信息也在变化,还可以注意到,随着远近的不同,参考深度标注图案,手形图标在界面上的深度值也会相应的发生变化,有时候在图标在某些标签的前面,有时候在某些标签后面。
public partial class MainWindow : Window { private KinectSensor kinectDevice; private Skeleton[] frameSkeletons; private WriteableBitmap depthImage; private Int32Rect depthImageRect; private short[] depthPixelData; private int depthImageStride; private const float FeetPerMeters = 3.2808399f; private KinectSensor KinectDevice { get { return this.kinectDevice; } set { if (kinectDevice != null) { kinectDevice = null; this.kinectDevice.Stop(); this.kinectDevice.AllFramesReady -= kinectDevice_AllFramesReady; this.kinectDevice.SkeletonStream.Disable(); this.DepthImage.Source = null; this.depthImage = null; this.depthImageStride = 0; this.frameSkeletons = null; this.frameSkeletons = null; } kinectDevice = value; if (kinectDevice != null) { if (kinectDevice.Status == KinectStatus.Connected) { kinectDevice.AllFramesReady+=kinectDevice_AllFramesReady; kinectDevice.SkeletonStream.Enable(); kinectDevice.DepthStream.Enable(); DepthImageStream depthStream = this.kinectDevice.DepthStream; this.depthImage = new WriteableBitmap(depthStream.FrameWidth, depthStream.FrameHeight, 96, 96, PixelFormats.Bgr32, null); this.depthImageRect = new Int32Rect(0,0,(int)Math.Ceiling(this.depthImage.Width),(int)Math.Ceiling(this.depthImage.Height)); this.depthImageStride = depthStream.FrameWidth * 4; this.depthPixelData = http://www.mamicode.com/new short[depthStream.FramePixelDataLength]; this.DepthImage.Source = this.depthImage; this.frameSkeletons = new Skeleton[this.kinectDevice.SkeletonStream.FrameSkeletonArrayLength]; kinectDevice.Start(); } } } } public MainWindow() { InitializeComponent(); this.Loaded += (s, e) => { KinectSensor.KinectSensors.StatusChanged += KinectSensors_StatusChanged; this.KinectDevice = KinectSensor.KinectSensors.FirstOrDefault(x => x.Status == KinectStatus.Connected); }; } private void KinectSensors_StatusChanged(Object sender, StatusChangedEventArgs e) { switch (e.Status) { case KinectStatus.Initializing: case KinectStatus.Connected: case KinectStatus.NotPowered: case KinectStatus.NotReady: case KinectStatus.DeviceNotGenuine: this.KinectDevice = e.Sensor; break; case KinectStatus.Disconnected: //TODO: Give the user feedback to plug-in a Kinect device. this.KinectDevice = null; break; default: //TODO: Show an error state break; } } private void kinectDevice_AllFramesReady(Object sender, AllFramesReadyEventArgs e) { using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) { if (depthFrame != null) { using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame()) { if (skeletonFrame != null) { ProcessDepthFrame(depthFrame); ProcessSkeletonFrame(skeletonFrame); } } } } } private void ProcessDepthFrame(DepthImageFrame depthFrame) { int depth; int gray; int bytesPerPixel = 4; byte[] enhPixelData = http://www.mamicode.com/new byte[depthFrame.Width*depthFrame.Height*bytesPerPixel]; depthFrame.CopyPixelDataTo(this.depthPixelData); for (int i = 0, j = 0; i < this.depthPixelData.Length;i++,j+=bytesPerPixel ) { depth = this.depthPixelData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth; if (depth==0) { gray = 0xFF; } else { gray = (255 * depth / 0xFFF); } enhPixelData[j] = (byte)gray; enhPixelData[j + 1] = (byte)gray; enhPixelData[j + 2] = (byte)gray; } this.depthImage.WritePixels(this.depthImageRect, enhPixelData, this.depthImageStride, 0); } private void ProcessSkeletonFrame(SkeletonFrame frame) { frame.CopySkeletonDataTo(this.frameSkeletons); Skeleton skeleton = GetPrimarySkeleton(this.frameSkeletons); if (skeleton != null) { TrackHand(skeleton.Joints[JointType.HandLeft], LeftHandElement, LeftHandScaleTransform, LayoutRoot, true); TrackHand(skeleton.Joints[JointType.HandRight], RightHandElement, RightHandScaleTransform, LayoutRoot, false); } } private Skeleton GetPrimarySkeleton(Skeleton[] skeleton) { Skeleton primarySkeleton = null; if (skeleton != null) { for (int i = 0; i < skeleton.Length; i++) { if (skeleton[i].TrackingState == SkeletonTrackingState.Tracked) { if (primarySkeleton == null) { primarySkeleton = skeleton[i]; } else { if (primarySkeleton.Position.Z > skeleton[i].Position.Z) { primarySkeleton = skeleton[i]; } } } } } return primarySkeleton; } private void TrackHand(Joint hand, FrameworkElement cursorElement, ScaleTransform cursorScale, FrameworkElement container, bool isLeft) { if (hand.TrackingState != JointTrackingState.NotTracked) { double z = hand.Position.Z * FeetPerMeters; //double z = hand.Position.Z ; cursorElement.Visibility = System.Windows.Visibility.Visible; Point cursorCenter = new Point(cursorElement.ActualWidth / 2.0, cursorElement.ActualHeight / 2.0); Point jointPoint = GetJointPoint(this.KinectDevice, hand, container.RenderSize, cursorCenter); Canvas.SetLeft(cursorElement, jointPoint.X); Canvas.SetTop(cursorElement, jointPoint.Y); Canvas.SetZIndex(cursorElement, (int)(1200 - (z * 100))); cursorScale.ScaleX = 12 / z * ((isLeft) ? -1 : 1); cursorScale.ScaleY = 12 / z; if (hand.JointType == JointType.HandLeft) { DebugLeftHand.Text = String.Format("Left Hand:{0:0.00} Feet", z); } else { DebugRightHand.Text = String.Format("Right Hand:{0:0.00} Feet", z); } } else { DebugLeftHand.Text = String.Empty; DebugRightHand.Text = String.Empty; } } private Point GetJointPoint(KinectSensor kinectSensor, Joint hand, Size containerSize, Point cursorCenter) { DepthImagePoint point = kinectSensor.MapSkeletonPointToDepth(hand.Position, KinectDevice.DepthStream.Format); point.X = (int)((point.X * containerSize.Width / kinectSensor.DepthStream.FrameWidth) - cursorCenter.X); point.Y = (int)((point.Y * containerSize.Height / kinectSensor.DepthStream.FrameHeight) - cursorCenter.Y); return new Point(point.X, point.Y); } }