Add proper get_pose for XRSpaces

This commit is contained in:
Manish Goregaokar 2019-04-04 16:06:23 -07:00
parent d2e2b8da4d
commit e33896f3ec
3 changed files with 53 additions and 25 deletions

View file

@ -55,6 +55,9 @@ impl XRReferenceSpaceMethods for XRReferenceSpace {
impl XRReferenceSpace { impl XRReferenceSpace {
/// Gets pose of the viewer with respect to this space /// Gets pose of the viewer with respect to this space
///
/// This is equivalent to `get_pose(self).inverse() * get_pose(viewerSpace)`, however
/// we specialize it to be efficient
pub fn get_viewer_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> { pub fn get_viewer_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
let pose = self.get_unoffset_viewer_pose(base_pose); let pose = self.get_unoffset_viewer_pose(base_pose);
@ -72,7 +75,8 @@ impl XRReferenceSpace {
stationary.get_unoffset_viewer_pose(base_pose) stationary.get_unoffset_viewer_pose(base_pose)
} else { } else {
// non-subclassed XRReferenceSpaces exist, obtained via the "identity" // non-subclassed XRReferenceSpaces exist, obtained via the "identity"
// type. The pose does not depend on the base pose. // type. These poses are equivalent to the viewer pose and follow the headset
// around, so the viewer is always at an identity transform with respect to them
RigidTransform3D::identity() RigidTransform3D::identity()
} }
} }
@ -82,7 +86,25 @@ impl XRReferenceSpace {
/// The reference origin used is common between all /// The reference origin used is common between all
/// get_pose calls for spaces from the same device, so this can be used to compare /// get_pose calls for spaces from the same device, so this can be used to compare
/// with other spaces /// with other spaces
pub fn get_pose(&self, _: &WebVRFrameData) -> RigidTransform3D<f64> { pub fn get_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
unimplemented!() let pose = self.get_unoffset_pose(base_pose);
// This may change, see https://github.com/immersive-web/webxr/issues/567
let offset = self.transform.get().transform();
offset.post_mul(&pose)
}
/// Gets pose represented by this space
///
/// Does not apply originOffset, use get_viewer_pose instead if you need it
pub fn get_unoffset_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
if let Some(stationary) = self.downcast::<XRStationaryReferenceSpace>() {
stationary.get_unoffset_pose(base_pose)
} else {
// non-subclassed XRReferenceSpaces exist, obtained via the "identity"
// type. These are equivalent to the viewer pose and follow the headset
// around
XRSpace::viewer_pose_from_frame_data(base_pose)
}
} }
} }

View file

@ -11,7 +11,7 @@ use crate::dom::globalscope::GlobalScope;
use crate::dom::xrreferencespace::XRReferenceSpace; use crate::dom::xrreferencespace::XRReferenceSpace;
use crate::dom::xrsession::XRSession; use crate::dom::xrsession::XRSession;
use dom_struct::dom_struct; use dom_struct::dom_struct;
use euclid::RigidTransform3D; use euclid::{RigidTransform3D, Rotation3D, Vector3D};
use webvr_traits::WebVRFrameData; use webvr_traits::WebVRFrameData;
#[dom_struct] #[dom_struct]
@ -39,16 +39,6 @@ impl XRSpace {
} }
impl XRSpace { impl XRSpace {
/// Gets pose of the viewer with respect to this space
#[allow(unused)]
pub fn get_viewer_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
if let Some(reference) = self.downcast::<XRReferenceSpace>() {
reference.get_viewer_pose(base_pose)
} else {
unreachable!()
}
}
/// Gets pose represented by this space /// Gets pose represented by this space
/// ///
/// The reference origin used is common between all /// The reference origin used is common between all
@ -62,4 +52,17 @@ impl XRSpace {
unreachable!() unreachable!()
} }
} }
pub fn viewer_pose_from_frame_data(data: &WebVRFrameData) -> RigidTransform3D<f64> {
let pos = data.pose.position.unwrap_or([0., 0., 0.]);
let translation = Vector3D::new(pos[0] as f64, pos[1] as f64, pos[2] as f64);
let orient = data.pose.orientation.unwrap_or([0., 0., 0., 0.]);
let rotation = Rotation3D::quaternion(
orient[0] as f64,
orient[1] as f64,
orient[2] as f64,
orient[3] as f64,
);
RigidTransform3D::new(rotation, translation)
}
} }

View file

@ -10,8 +10,9 @@ use crate::dom::globalscope::GlobalScope;
use crate::dom::xrreferencespace::XRReferenceSpace; use crate::dom::xrreferencespace::XRReferenceSpace;
use crate::dom::xrrigidtransform::XRRigidTransform; use crate::dom::xrrigidtransform::XRRigidTransform;
use crate::dom::xrsession::XRSession; use crate::dom::xrsession::XRSession;
use crate::dom::xrspace::XRSpace;
use dom_struct::dom_struct; use dom_struct::dom_struct;
use euclid::{RigidTransform3D, Rotation3D, Vector3D}; use euclid::RigidTransform3D;
use webvr_traits::WebVRFrameData; use webvr_traits::WebVRFrameData;
#[dom_struct] #[dom_struct]
@ -55,15 +56,17 @@ impl XRStationaryReferenceSpace {
/// Does not apply originOffset, use get_viewer_pose on XRReferenceSpace instead /// Does not apply originOffset, use get_viewer_pose on XRReferenceSpace instead
pub fn get_unoffset_viewer_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> { pub fn get_unoffset_viewer_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
// XXXManishearth add floor-level transform for floor-level and disable position in position-disabled // XXXManishearth add floor-level transform for floor-level and disable position in position-disabled
let pos = base_pose.pose.position.unwrap_or([0., 0., 0.]); XRSpace::viewer_pose_from_frame_data(base_pose)
let translation = Vector3D::new(pos[0] as f64, pos[1] as f64, pos[2] as f64); }
let orient = base_pose.pose.orientation.unwrap_or([0., 0., 0., 0.]);
let rotation = Rotation3D::quaternion( /// Gets pose represented by this space
orient[0] as f64, ///
orient[1] as f64, /// Does not apply originOffset, use get_pose on XRReferenceSpace instead
orient[2] as f64, pub fn get_unoffset_pose(&self, _: &WebVRFrameData) -> RigidTransform3D<f64> {
orient[3] as f64, // XXXManishearth add floor-level transform for floor-level and disable position in position-disabled
);
RigidTransform3D::new(rotation, translation) // The eye-level pose is basically whatever the headset pose was at t=0, which
// for most devices is (0, 0, 0)
RigidTransform3D::identity()
} }
} }