Auto merge of #23169 - Manishearth:more-pose, r=jdm

Some more pose fixes

Based on https://github.com/servo/servo/pull/23164

This:

 - Adds support for position-disabled
 - Adds approximate support for floor-level
 - Makes transform.inverse.inverse return the original transform
(https://github.com/immersive-web/webxr/issues/576)

To support floor-level *properly* we have to decompose the `sitting_to_standing_transform` matrix. I'll have to add decomposition to euclid to do this, sadly.

It may be possible to reuse the decomposition code in servo's style crate, but there's a chance that that's written with column vector style, given that it reflects the CSS transforms spec, which is also written in column vector style. Ugh.

r? @asajeffrey

<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/23169)
<!-- Reviewable:end -->
This commit is contained in:
bors-servo 2019-04-15 18:46:12 -04:00 committed by GitHub
commit 0ba7da4431
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 70 additions and 12 deletions

View file

@ -56,12 +56,18 @@ impl XRReferenceSpaceMethods for XRReferenceSpace {
impl XRReferenceSpace {
/// Gets pose of the viewer with respect to this space
///
/// This is equivalent to `get_pose(self).inverse() * get_pose(viewerSpace)`, however
/// we specialize it to be efficient
/// This is equivalent to `get_pose(self).inverse() * get_pose(viewerSpace)` (in column vector notation),
/// however we specialize it to be efficient
pub fn get_viewer_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
let pose = self.get_unoffset_viewer_pose(base_pose);
// This may change, see https://github.com/immersive-web/webxr/issues/567
// in column-vector notation,
// get_viewer_pose(space) = get_pose(space).inverse() * get_pose(viewer_space)
// = (get_unoffset_pose(space) * offset).inverse() * get_pose(viewer_space)
// = offset.inverse() * get_unoffset_pose(space).inverse() * get_pose(viewer_space)
// = offset.inverse() * get_unoffset_viewer_pose(space)
let offset = self.transform.get().transform();
let inverse = offset.inverse();
inverse.pre_mul(&pose)

View file

@ -100,8 +100,11 @@ impl XRRigidTransformMethods for XRRigidTransform {
}
// https://immersive-web.github.io/webxr/#dom-xrrigidtransform-inverse
fn Inverse(&self) -> DomRoot<XRRigidTransform> {
self.inverse
.or_init(|| XRRigidTransform::new(&self.global(), self.transform.inverse()))
self.inverse.or_init(|| {
let transform = XRRigidTransform::new(&self.global(), self.transform.inverse());
transform.inverse.set(Some(self));
transform
})
}
// https://immersive-web.github.io/webxr/#dom-xrrigidtransform-matrix
#[allow(unsafe_code)]

View file

@ -12,7 +12,7 @@ use crate::dom::xrrigidtransform::XRRigidTransform;
use crate::dom::xrsession::XRSession;
use crate::dom::xrspace::XRSpace;
use dom_struct::dom_struct;
use euclid::RigidTransform3D;
use euclid::{RigidTransform3D, Vector3D};
use webvr_traits::WebVRFrameData;
#[dom_struct]
@ -54,19 +54,68 @@ impl XRStationaryReferenceSpace {
/// Gets pose of the viewer with respect to this space
///
/// Does not apply originOffset, use get_viewer_pose on XRReferenceSpace instead
pub fn get_unoffset_viewer_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
// XXXManishearth add floor-level transform for floor-level and disable position in position-disabled
XRSpace::viewer_pose_from_frame_data(base_pose)
pub fn get_unoffset_viewer_pose(&self, viewer_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
let viewer_pose = XRSpace::viewer_pose_from_frame_data(viewer_pose);
// all math is in column-vector notation
// we use the following equation to verify correctness here:
// get_viewer_pose(space) = get_pose(space).inverse() * get_pose(viewer_space)
match self.ty {
XRStationaryReferenceSpaceSubtype::Eye_level => {
// get_viewer_pose(eye_level) = get_pose(eye_level).inverse() * get_pose(viewer_space)
// = I * viewer_pose
// = viewer_pose
// we get viewer poses in eye-level space by default
viewer_pose
},
XRStationaryReferenceSpaceSubtype::Floor_level => {
// XXXManishearth support getting floor info from stage parameters
// get_viewer_pose(floor_level) = get_pose(floor_level).inverse() * get_pose(viewer_space)
// = Translate(-2).inverse() * viewer_pose
// = Translate(2) * viewer_pose
// assume approximate user height of 2 meters
let floor_to_eye: RigidTransform3D<f64> = Vector3D::new(0., 2., 0.).into();
floor_to_eye.pre_mul(&viewer_pose)
},
XRStationaryReferenceSpaceSubtype::Position_disabled => {
// get_viewer_pose(pos_disabled) = get_pose(pos_disabled).inverse() * get_pose(viewer_space)
// = viewer_pose.translation.inverse() * viewer_pose
// = viewer_pose.translation.inverse() * viewer_pose.translation
// * viewer_pose.rotation
// = viewer_pose.rotation
// This space follows the user around, but does not mirror the user's orientation
// Thus, the viewer's pose relative to this space is simply their orientation
viewer_pose.rotation.into()
},
}
}
/// Gets pose represented by this space
///
/// Does not apply originOffset, use get_pose on XRReferenceSpace instead
pub fn get_unoffset_pose(&self, _: &WebVRFrameData) -> RigidTransform3D<f64> {
pub fn get_unoffset_pose(&self, viewer_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
// XXXManishearth add floor-level transform for floor-level and disable position in position-disabled
match self.ty {
XRStationaryReferenceSpaceSubtype::Eye_level => {
// The eye-level pose is basically whatever the headset pose was at t=0, which
// for most devices is (0, 0, 0)
RigidTransform3D::identity()
},
XRStationaryReferenceSpaceSubtype::Floor_level => {
// XXXManishearth support getting floor info from stage parameters
// Assume approximate height of 2m
// the floor-level space is 2m below the eye-level space, which is (0, 0, 0)
Vector3D::new(0., -2., 0.).into()
},
XRStationaryReferenceSpaceSubtype::Position_disabled => {
// This space follows the user around, but does not mirror the user's orientation
let viewer_pose = XRSpace::viewer_pose_from_frame_data(viewer_pose);
viewer_pose.translation.into()
},
}
}
}