Update reference spaces to new model

This commit is contained in:
Manish Goregaokar 2019-06-10 15:34:18 -07:00
parent 4c776f33d4
commit 26e0aaca6c
8 changed files with 76 additions and 225 deletions

View file

@ -552,7 +552,6 @@ pub mod xrrenderstate;
pub mod xrrigidtransform;
pub mod xrsession;
pub mod xrspace;
pub mod xrstationaryreferencespace;
pub mod xrtest;
pub mod xrview;
pub mod xrviewerpose;

View file

@ -4,6 +4,14 @@
// https://immersive-web.github.io/webxr/#xrreferencespace-interface
enum XRReferenceSpaceType {
"viewer",
"local",
"local-floor",
"bounded-floor",
"unbounded"
};
[SecureContext, Exposed=Window, Pref="dom.webxr.enabled"]
interface XRReferenceSpace : XRSpace {
attribute XRRigidTransform originOffset;

View file

@ -20,10 +20,9 @@ interface XRSession : EventTarget {
readonly attribute XREnvironmentBlendMode environmentBlendMode;
readonly attribute XRRenderState renderState;
[SameObject] readonly attribute XRSpace viewerSpace;
// // Methods
Promise<XRReferenceSpace> requestReferenceSpace(XRReferenceSpaceOptions options);
Promise<XRReferenceSpace> requestReferenceSpace(XRReferenceSpaceType type);
// workaround until we have FrozenArray
// see https://github.com/servo/servo/issues/10427#issuecomment-449593626
@ -46,14 +45,3 @@ interface XRSession : EventTarget {
// attribute EventHandler onselectend;
};
enum XRReferenceSpaceType {
"identity",
"stationary",
"bounded",
"unbounded"
};
dictionary XRReferenceSpaceOptions {
required XRReferenceSpaceType type;
XRStationaryReferenceSpaceSubtype subtype;
};

View file

@ -1,16 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
// https://immersive-web.github.io/webxr/#xrstationaryreferencespace-interface
enum XRStationaryReferenceSpaceSubtype {
"eye-level",
"floor-level",
"position-disabled"
};
[SecureContext, Exposed=Window, Pref="dom.webxr.enabled"]
interface XRStationaryReferenceSpace: XRReferenceSpace {
// readonly attribute XRStationaryReferenceSpaceSubtype subtype;
};

View file

@ -4,37 +4,46 @@
use crate::dom::bindings::codegen::Bindings::XRReferenceSpaceBinding;
use crate::dom::bindings::codegen::Bindings::XRReferenceSpaceBinding::XRReferenceSpaceMethods;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::codegen::Bindings::XRReferenceSpaceBinding::XRReferenceSpaceType;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, MutDom};
use crate::dom::globalscope::GlobalScope;
use crate::dom::xrrigidtransform::XRRigidTransform;
use crate::dom::xrsession::XRSession;
use crate::dom::xrspace::XRSpace;
use crate::dom::xrstationaryreferencespace::XRStationaryReferenceSpace;
use dom_struct::dom_struct;
use euclid::RigidTransform3D;
use euclid::{RigidTransform3D, Vector3D};
use webvr_traits::WebVRFrameData;
#[dom_struct]
pub struct XRReferenceSpace {
xrspace: XRSpace,
transform: MutDom<XRRigidTransform>,
ty: XRReferenceSpaceType,
}
impl XRReferenceSpace {
pub fn new_inherited(session: &XRSession, transform: &XRRigidTransform) -> XRReferenceSpace {
pub fn new_inherited(
session: &XRSession,
transform: &XRRigidTransform,
ty: XRReferenceSpaceType,
) -> XRReferenceSpace {
XRReferenceSpace {
xrspace: XRSpace::new_inherited(session),
transform: MutDom::new(transform),
ty,
}
}
#[allow(unused)]
pub fn identity(global: &GlobalScope, session: &XRSession) -> DomRoot<XRReferenceSpace> {
pub fn new(
global: &GlobalScope,
session: &XRSession,
ty: XRReferenceSpaceType,
) -> DomRoot<XRReferenceSpace> {
let transform = XRRigidTransform::identity(global);
reflect_dom_object(
Box::new(XRReferenceSpace::new_inherited(session, &transform)),
Box::new(XRReferenceSpace::new_inherited(session, &transform, ty)),
global,
XRReferenceSpaceBinding::Wrap,
)
@ -77,13 +86,36 @@ impl XRReferenceSpace {
///
/// Does not apply originOffset, use get_viewer_pose instead if you need it
pub fn get_unoffset_viewer_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
if let Some(stationary) = self.downcast::<XRStationaryReferenceSpace>() {
stationary.get_unoffset_viewer_pose(base_pose)
} else {
// non-subclassed XRReferenceSpaces exist, obtained via the "identity"
// type. These poses are equivalent to the viewer pose and follow the headset
// around, so the viewer is always at an identity transform with respect to them
RigidTransform3D::identity()
let viewer_pose = XRSpace::pose_to_transform(&base_pose.pose);
// all math is in column-vector notation
// we use the following equation to verify correctness here:
// get_viewer_pose(space) = get_pose(space).inverse() * get_pose(viewer_space)
match self.ty {
XRReferenceSpaceType::Local => {
// get_viewer_pose(eye_level) = get_pose(eye_level).inverse() * get_pose(viewer_space)
// = I * viewer_pose
// = viewer_pose
// we get viewer poses in eye-level space by default
viewer_pose
},
XRReferenceSpaceType::Local_floor => {
// XXXManishearth support getting floor info from stage parameters
// get_viewer_pose(floor_level) = get_pose(floor_level).inverse() * get_pose(viewer_space)
// = Translate(-2).inverse() * viewer_pose
// = Translate(2) * viewer_pose
// assume approximate user height of 2 meters
let floor_to_eye: RigidTransform3D<f64> = Vector3D::new(0., 2., 0.).into();
floor_to_eye.pre_mul(&viewer_pose)
},
XRReferenceSpaceType::Viewer => {
// This reference space follows the viewer around, so the viewer is
// always at an identity transform with respect to it
RigidTransform3D::identity()
},
_ => unimplemented!(),
}
}
@ -104,13 +136,21 @@ impl XRReferenceSpace {
///
/// Does not apply originOffset, use get_viewer_pose instead if you need it
pub fn get_unoffset_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
if let Some(stationary) = self.downcast::<XRStationaryReferenceSpace>() {
stationary.get_unoffset_pose(base_pose)
} else {
// non-subclassed XRReferenceSpaces exist, obtained via the "identity"
// type. These are equivalent to the viewer pose and follow the headset
// around
XRSpace::pose_to_transform(&base_pose.pose)
match self.ty {
XRReferenceSpaceType::Local => {
// The eye-level pose is basically whatever the headset pose was at t=0, which
// for most devices is (0, 0, 0)
RigidTransform3D::identity()
},
XRReferenceSpaceType::Local_floor => {
// XXXManishearth support getting floor info from stage parameters
// Assume approximate height of 2m
// the floor-level space is 2m below the eye-level space, which is (0, 0, 0)
Vector3D::new(0., -2., 0.).into()
},
XRReferenceSpaceType::Viewer => XRSpace::pose_to_transform(&base_pose.pose),
_ => unimplemented!(),
}
}
}

View file

@ -5,12 +5,11 @@
use crate::compartments::InCompartment;
use crate::dom::bindings::codegen::Bindings::VRDisplayBinding::VRDisplayMethods;
use crate::dom::bindings::codegen::Bindings::XRBinding::XRSessionMode;
use crate::dom::bindings::codegen::Bindings::XRReferenceSpaceBinding::XRReferenceSpaceType;
use crate::dom::bindings::codegen::Bindings::XRRenderStateBinding::XRRenderStateInit;
use crate::dom::bindings::codegen::Bindings::XRSessionBinding;
use crate::dom::bindings::codegen::Bindings::XRSessionBinding::XREnvironmentBlendMode;
use crate::dom::bindings::codegen::Bindings::XRSessionBinding::XRFrameRequestCallback;
use crate::dom::bindings::codegen::Bindings::XRSessionBinding::XRReferenceSpaceOptions;
use crate::dom::bindings::codegen::Bindings::XRSessionBinding::XRReferenceSpaceType;
use crate::dom::bindings::codegen::Bindings::XRSessionBinding::XRSessionMethods;
use crate::dom::bindings::error::Error;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
@ -24,7 +23,6 @@ use crate::dom::xrlayer::XRLayer;
use crate::dom::xrreferencespace::XRReferenceSpace;
use crate::dom::xrrenderstate::XRRenderState;
use crate::dom::xrspace::XRSpace;
use crate::dom::xrstationaryreferencespace::XRStationaryReferenceSpace;
use dom_struct::dom_struct;
use std::rc::Rc;
@ -87,12 +85,6 @@ impl XRSessionMethods for XRSession {
)
}
// https://immersive-web.github.io/webxr/#dom-xrsession-viewerspace
fn ViewerSpace(&self) -> DomRoot<XRSpace> {
self.viewer_space
.or_init(|| XRSpace::new_viewerspace(&self.global(), &self))
}
/// https://immersive-web.github.io/webxr/#dom-xrsession-requestanimationframe
fn UpdateRenderState(&self, init: &XRRenderStateInit, comp: InCompartment) -> Rc<Promise> {
let p = Promise::new_in_current_compartment(&self.global(), comp);
@ -116,11 +108,7 @@ impl XRSessionMethods for XRSession {
}
/// https://immersive-web.github.io/webxr/#dom-xrsession-requestreferencespace
fn RequestReferenceSpace(
&self,
options: &XRReferenceSpaceOptions,
comp: InCompartment,
) -> Rc<Promise> {
fn RequestReferenceSpace(&self, ty: XRReferenceSpaceType, comp: InCompartment) -> Rc<Promise> {
let p = Promise::new_in_current_compartment(&self.global(), comp);
// https://immersive-web.github.io/webxr/#create-a-reference-space
@ -128,27 +116,14 @@ impl XRSessionMethods for XRSession {
// XXXManishearth reject based on session type
// https://github.com/immersive-web/webxr/blob/master/spatial-tracking-explainer.md#practical-usage-guidelines
match options.type_ {
XRReferenceSpaceType::Identity => {
p.resolve_native(&XRReferenceSpace::identity(&self.global(), self));
},
XRReferenceSpaceType::Stationary => {
if let Some(subtype) = options.subtype {
p.resolve_native(&XRStationaryReferenceSpace::new(
&self.global(),
self,
subtype,
));
} else {
p.reject_error(Error::Type(format!(
"stationary XRReferenceSpaces must specify a subtype"
)))
}
},
XRReferenceSpaceType::Bounded | XRReferenceSpaceType::Unbounded => {
match ty {
XRReferenceSpaceType::Bounded_floor | XRReferenceSpaceType::Unbounded => {
// XXXManishearth eventually support these
p.reject_error(Error::NotSupported)
},
ty => {
p.resolve_native(&XRReferenceSpace::new(&self.global(), self, ty));
},
}
p

View file

@ -19,7 +19,6 @@ use webvr_traits::{WebVRFrameData, WebVRPose};
pub struct XRSpace {
eventtarget: EventTarget,
session: Dom<XRSession>,
is_viewerspace: bool,
input_source: MutNullableDom<XRInputSource>,
}
@ -28,33 +27,14 @@ impl XRSpace {
XRSpace {
eventtarget: EventTarget::new_inherited(),
session: Dom::from_ref(session),
is_viewerspace: false,
input_source: Default::default(),
}
}
fn new_viewerspace_inner(session: &XRSession) -> XRSpace {
XRSpace {
eventtarget: EventTarget::new_inherited(),
session: Dom::from_ref(session),
is_viewerspace: true,
input_source: Default::default(),
}
}
pub fn new_viewerspace(global: &GlobalScope, session: &XRSession) -> DomRoot<XRSpace> {
reflect_dom_object(
Box::new(XRSpace::new_viewerspace_inner(session)),
global,
XRSpaceBinding::Wrap,
)
}
fn new_inputspace_inner(session: &XRSession, input: &XRInputSource) -> XRSpace {
XRSpace {
eventtarget: EventTarget::new_inherited(),
session: Dom::from_ref(session),
is_viewerspace: false,
input_source: MutNullableDom::new(Some(input)),
}
}
@ -81,8 +61,6 @@ impl XRSpace {
pub fn get_pose(&self, base_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
if let Some(reference) = self.downcast::<XRReferenceSpace>() {
reference.get_pose(base_pose)
} else if self.is_viewerspace {
XRSpace::pose_to_transform(&base_pose.pose)
} else if let Some(source) = self.input_source.get() {
XRSpace::pose_to_transform(&source.pose())
} else {

View file

@ -1,121 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::XRStationaryReferenceSpaceBinding;
use crate::dom::bindings::codegen::Bindings::XRStationaryReferenceSpaceBinding::XRStationaryReferenceSpaceSubtype;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::globalscope::GlobalScope;
use crate::dom::xrreferencespace::XRReferenceSpace;
use crate::dom::xrrigidtransform::XRRigidTransform;
use crate::dom::xrsession::XRSession;
use crate::dom::xrspace::XRSpace;
use dom_struct::dom_struct;
use euclid::{RigidTransform3D, Vector3D};
use webvr_traits::WebVRFrameData;
#[dom_struct]
pub struct XRStationaryReferenceSpace {
xrreferencespace: XRReferenceSpace,
ty: XRStationaryReferenceSpaceSubtype,
}
#[allow(unused)]
impl XRStationaryReferenceSpace {
pub fn new_inherited(
session: &XRSession,
ty: XRStationaryReferenceSpaceSubtype,
transform: &XRRigidTransform,
) -> XRStationaryReferenceSpace {
XRStationaryReferenceSpace {
xrreferencespace: XRReferenceSpace::new_inherited(session, transform),
ty,
}
}
pub fn new(
global: &GlobalScope,
session: &XRSession,
ty: XRStationaryReferenceSpaceSubtype,
) -> DomRoot<XRStationaryReferenceSpace> {
let transform = XRRigidTransform::identity(global);
reflect_dom_object(
Box::new(XRStationaryReferenceSpace::new_inherited(
session, ty, &transform,
)),
global,
XRStationaryReferenceSpaceBinding::Wrap,
)
}
}
impl XRStationaryReferenceSpace {
/// Gets pose of the viewer with respect to this space
///
/// Does not apply originOffset, use get_viewer_pose on XRReferenceSpace instead
pub fn get_unoffset_viewer_pose(&self, viewer_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
let viewer_pose = XRSpace::pose_to_transform(&viewer_pose.pose);
// all math is in column-vector notation
// we use the following equation to verify correctness here:
// get_viewer_pose(space) = get_pose(space).inverse() * get_pose(viewer_space)
match self.ty {
XRStationaryReferenceSpaceSubtype::Eye_level => {
// get_viewer_pose(eye_level) = get_pose(eye_level).inverse() * get_pose(viewer_space)
// = I * viewer_pose
// = viewer_pose
// we get viewer poses in eye-level space by default
viewer_pose
},
XRStationaryReferenceSpaceSubtype::Floor_level => {
// XXXManishearth support getting floor info from stage parameters
// get_viewer_pose(floor_level) = get_pose(floor_level).inverse() * get_pose(viewer_space)
// = Translate(-2).inverse() * viewer_pose
// = Translate(2) * viewer_pose
// assume approximate user height of 2 meters
let floor_to_eye: RigidTransform3D<f64> = Vector3D::new(0., 2., 0.).into();
floor_to_eye.pre_mul(&viewer_pose)
},
XRStationaryReferenceSpaceSubtype::Position_disabled => {
// get_viewer_pose(pos_disabled) = get_pose(pos_disabled).inverse() * get_pose(viewer_space)
// = viewer_pose.translation.inverse() * viewer_pose
// = viewer_pose.translation.inverse() * viewer_pose.translation
// * viewer_pose.rotation
// = viewer_pose.rotation
// This space follows the user around, but does not mirror the user's orientation
// Thus, the viewer's pose relative to this space is simply their orientation
viewer_pose.rotation.into()
},
}
}
/// Gets pose represented by this space
///
/// Does not apply originOffset, use get_pose on XRReferenceSpace instead
pub fn get_unoffset_pose(&self, viewer_pose: &WebVRFrameData) -> RigidTransform3D<f64> {
// XXXManishearth add floor-level transform for floor-level and disable position in position-disabled
match self.ty {
XRStationaryReferenceSpaceSubtype::Eye_level => {
// The eye-level pose is basically whatever the headset pose was at t=0, which
// for most devices is (0, 0, 0)
RigidTransform3D::identity()
},
XRStationaryReferenceSpaceSubtype::Floor_level => {
// XXXManishearth support getting floor info from stage parameters
// Assume approximate height of 2m
// the floor-level space is 2m below the eye-level space, which is (0, 0, 0)
Vector3D::new(0., -2., 0.).into()
},
XRStationaryReferenceSpaceSubtype::Position_disabled => {
// This space follows the user around, but does not mirror the user's orientation
let viewer_pose = XRSpace::pose_to_transform(&viewer_pose.pose);
viewer_pose.translation.into()
},
}
}
}