Auto merge of #25259 - Manishearth:wpt-fixes, r=asajeffrey

Various webxr WPT fixes

Needs https://github.com/servo/webxr/pull/108

r? @asajeffrey

Went through most of the failing tests and fixed them. Many of the remaining ones fail due to unsupported features that I can slowly whittle away.
This commit is contained in:
bors-servo 2019-12-12 18:13:15 -05:00 committed by GitHub
commit 748edb2cd9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
24 changed files with 212 additions and 180 deletions

View file

@ -20,7 +20,7 @@ use ipc_channel::ipc::IpcSender;
use ipc_channel::router::ROUTER;
use profile_traits::ipc;
use std::rc::Rc;
use webxr_api::{MockDeviceMsg, View, Views};
use webxr_api::{MockDeviceMsg, MockViewInit, MockViewsInit};
#[dom_struct]
pub struct FakeXRDevice {
@ -50,58 +50,57 @@ impl FakeXRDevice {
}
}
pub fn get_views(views: &[FakeXRViewInit]) -> Fallible<Views> {
if views.len() != 2 {
return Err(Error::NotSupported);
}
let (left, right) = match (views[0].eye, views[1].eye) {
(XREye::Left, XREye::Right) => (&views[0], &views[1]),
(XREye::Right, XREye::Left) => (&views[1], &views[0]),
_ => return Err(Error::NotSupported),
};
if left.projectionMatrix.len() != 16 ||
right.projectionMatrix.len() != 16 ||
left.viewOffset.position.len() != 3 ||
right.viewOffset.position.len() != 3
{
pub fn view<Eye>(view: &FakeXRViewInit) -> Fallible<MockViewInit<Eye>> {
if view.projectionMatrix.len() != 16 || view.viewOffset.position.len() != 3 {
return Err(Error::Type("Incorrectly sized array".into()));
}
let mut proj_l = [0.; 16];
let mut proj_r = [0.; 16];
let v: Vec<_> = left.projectionMatrix.iter().map(|x| **x).collect();
proj_l.copy_from_slice(&v);
let proj_l = Transform3D::from_array(proj_l);
let v: Vec<_> = right.projectionMatrix.iter().map(|x| **x).collect();
proj_r.copy_from_slice(&v);
let proj_r = Transform3D::from_array(proj_r);
let mut proj = [0.; 16];
let v: Vec<_> = view.projectionMatrix.iter().map(|x| **x).collect();
proj.copy_from_slice(&v);
let projection = Transform3D::from_array(proj);
// spec defines offsets as origins, but mock API expects the inverse transform
let offset_l = get_origin(&left.viewOffset)?.inverse();
let offset_r = get_origin(&right.viewOffset)?.inverse();
let transform = get_origin(&view.viewOffset)?.inverse();
let size_l = Size2D::new(views[0].resolution.width, views[0].resolution.height);
let size_r = Size2D::new(views[1].resolution.width, views[1].resolution.height);
let origin_l = Point2D::new(0, 0);
let origin_r = Point2D::new(size_l.width, 0);
let viewport_l = Rect::new(origin_l, size_l);
let viewport_r = Rect::new(origin_r, size_r);
let left = View {
projection: proj_l,
transform: offset_l,
viewport: viewport_l,
let size = Size2D::new(view.resolution.width, view.resolution.height);
let origin = match view.eye {
XREye::Right => Point2D::new(size.width, 0),
_ => Point2D::new(0, 0),
};
let right = View {
projection: proj_r,
transform: offset_r,
viewport: viewport_r,
let viewport = Rect::new(origin, size);
let fov = if let Some(ref fov) = view.fieldOfView {
Some((
fov.leftDegrees.to_radians(),
fov.rightDegrees.to_radians(),
fov.upDegrees.to_radians(),
fov.downDegrees.to_radians(),
))
} else {
None
};
Ok(Views::Stereo(left, right))
Ok(MockViewInit {
projection,
transform,
viewport,
fov,
})
}
pub fn get_views(views: &[FakeXRViewInit]) -> Fallible<MockViewsInit> {
match views.len() {
1 => Ok(MockViewsInit::Mono(view(&views[0])?)),
2 => {
let (left, right) = match (views[0].eye, views[1].eye) {
(XREye::Left, XREye::Right) => (&views[0], &views[1]),
(XREye::Right, XREye::Left) => (&views[1], &views[0]),
_ => return Err(Error::NotSupported),
};
Ok(MockViewsInit::Stereo(view(left)?, view(right)?))
},
_ => Err(Error::NotSupported),
}
}
pub fn get_origin<T, U>(
@ -134,7 +133,7 @@ impl FakeXRDeviceMethods for FakeXRDevice {
Ok(())
}
/// https://github.com/immersive-web/webxr-test-api/blob/master/explainer.md
/// https://immersive-web.github.io/webxr-test-api/#dom-fakexrdevice-setviewerorigin
fn SetViewerOrigin(
&self,
origin: &FakeXRRigidTransformInit,
@ -142,7 +141,25 @@ impl FakeXRDeviceMethods for FakeXRDevice {
) -> Fallible<()> {
let _ = self
.sender
.send(MockDeviceMsg::SetViewerOrigin(get_origin(origin)?));
.send(MockDeviceMsg::SetViewerOrigin(Some(get_origin(origin)?)));
Ok(())
}
/// https://immersive-web.github.io/webxr-test-api/#dom-fakexrdevice-clearviewerorigin
fn ClearViewerOrigin(&self) {
let _ = self.sender.send(MockDeviceMsg::SetViewerOrigin(None));
}
/// https://immersive-web.github.io/webxr-test-api/#dom-fakexrdevice-clearfloororigin
fn ClearFloorOrigin(&self) {
let _ = self.sender.send(MockDeviceMsg::SetFloorOrigin(None));
}
/// https://immersive-web.github.io/webxr-test-api/#dom-fakexrdevice-setfloororigin
fn SetFloorOrigin(&self, origin: &FakeXRRigidTransformInit) -> Fallible<()> {
let _ = self
.sender
.send(MockDeviceMsg::SetFloorOrigin(Some(get_origin(origin)?)));
Ok(())
}

View file

@ -13,8 +13,11 @@ interface FakeXRDevice {
// // behaves as if device was disconnected
// Promise<void> disconnect();
// Sets the origin of the viewer
[Throws] void setViewerOrigin(FakeXRRigidTransformInit origin, optional boolean emulatedPosition = false);
void clearViewerOrigin();
[Throws] void setFloorOrigin(FakeXRRigidTransformInit origin);
void clearFloorOrigin();
// // Simulates devices focusing and blurring sessions.
// void simulateVisibilityChange(XRVisibilityState);
@ -40,6 +43,8 @@ dictionary FakeXRViewInit {
required FakeXRRigidTransformInit viewOffset;
// https://immersive-web.github.io/webxr/#dom-xrwebgllayer-getviewport
required FakeXRDeviceResolution resolution;
FakeXRFieldOfViewInit fieldOfView;
};
// https://immersive-web.github.io/webxr/#xrviewport
@ -56,3 +61,10 @@ dictionary FakeXRRigidTransformInit {
required sequence<float> position;
required sequence<float> orientation;
};
dictionary FakeXRFieldOfViewInit {
required float upDegrees;
required float downDegrees;
required float leftDegrees;
required float rightDegrees;
};

View file

@ -14,6 +14,6 @@ dictionary XRRenderStateInit {
[SecureContext, Exposed=Window, Pref="dom.webxr.enabled"] interface XRRenderState {
readonly attribute double depthNear;
readonly attribute double depthFar;
readonly attribute double inlineVerticalFieldOfView;
readonly attribute double? inlineVerticalFieldOfView;
readonly attribute XRWebGLLayer? baseLayer;
};

View file

@ -162,12 +162,12 @@ impl XRMethods for XR {
) -> Rc<Promise> {
let promise = Promise::new_in_current_compartment(&self.global(), comp);
if !ScriptThread::is_user_interacting() {
promise.reject_error(Error::Security);
return promise;
}
if mode != XRSessionMode::Inline {
if !ScriptThread::is_user_interacting() {
promise.reject_error(Error::Security);
return promise;
}
if self.pending_or_active_session() {
promise.reject_error(Error::InvalidState);
return promise;

View file

@ -77,7 +77,11 @@ impl XRFrameMethods for XRFrame {
return Err(Error::InvalidState);
}
let pose = reference.get_viewer_pose(&self.data);
let pose = if let Some(pose) = reference.get_viewer_pose(&self.data) {
pose
} else {
return Ok(None);
};
Ok(Some(XRViewerPose::new(&self.global(), &self.session, pose)))
}

View file

@ -10,10 +10,10 @@ use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::globalscope::GlobalScope;
use crate::dom::xrrigidtransform::XRRigidTransform;
use crate::dom::xrsession::{cast_transform, ApiPose, ApiRigidTransform, ApiViewerPose, XRSession};
use crate::dom::xrsession::{cast_transform, ApiPose, ApiViewerPose, XRSession};
use crate::dom::xrspace::XRSpace;
use dom_struct::dom_struct;
use euclid::{RigidTransform3D, Vector3D};
use euclid::RigidTransform3D;
use webxr_api::Frame;
#[dom_struct]
@ -80,8 +80,8 @@ impl XRReferenceSpace {
///
/// This is equivalent to `get_pose(self).inverse() * get_pose(viewerSpace)` (in column vector notation),
/// however we specialize it to be efficient
pub fn get_viewer_pose(&self, base_pose: &Frame) -> ApiViewerPose {
let pose = self.get_unoffset_viewer_pose(base_pose);
pub fn get_viewer_pose(&self, base_pose: &Frame) -> Option<ApiViewerPose> {
let pose = self.get_unoffset_viewer_pose(base_pose)?;
// in column-vector notation,
// get_viewer_pose(space) = get_pose(space).inverse() * get_pose(viewer_space)
// = (get_unoffset_pose(space) * offset).inverse() * get_pose(viewer_space)
@ -89,14 +89,13 @@ impl XRReferenceSpace {
// = offset.inverse() * get_unoffset_viewer_pose(space)
let offset = self.offset.transform();
let inverse = offset.inverse();
inverse.pre_transform(&pose)
Some(inverse.pre_transform(&pose))
}
/// Gets pose of the viewer with respect to this space
///
/// Does not apply originOffset, use get_viewer_pose instead if you need it
pub fn get_unoffset_viewer_pose(&self, base_pose: &Frame) -> ApiViewerPose {
let viewer_pose: ApiViewerPose = cast_transform(base_pose.transform);
pub fn get_unoffset_viewer_pose(&self, base_pose: &Frame) -> Option<ApiViewerPose> {
// all math is in column-vector notation
// we use the following equation to verify correctness here:
// get_viewer_pose(space) = get_pose(space).inverse() * get_pose(viewer_space)
@ -105,25 +104,27 @@ impl XRReferenceSpace {
// get_viewer_pose(eye_level) = get_pose(eye_level).inverse() * get_pose(viewer_space)
// = I * viewer_pose
// = viewer_pose
let viewer_pose: ApiViewerPose = cast_transform(base_pose.transform?);
// we get viewer poses in eye-level space by default
viewer_pose
Some(viewer_pose)
},
XRReferenceSpaceType::Local_floor => {
// XXXManishearth support getting floor info from stage parameters
// get_viewer_pose(floor_level) = get_pose(floor_level).inverse() * get_pose(viewer_space)
// = Translate(-2).inverse() * viewer_pose
// = Translate(2) * viewer_pose
// = floor_to_native.inverse() * viewer_pose
// = native_to_floor * viewer_pose
let viewer_pose = base_pose.transform?;
let native_to_floor = self
.upcast::<XRSpace>()
.session()
.with_session(|s| s.floor_transform())?;
// assume approximate user height of 2 meters
let floor_to_eye: ApiRigidTransform = Vector3D::new(0., 2., 0.).into();
floor_to_eye.pre_transform(&viewer_pose)
Some(cast_transform(native_to_floor.pre_transform(&viewer_pose)))
},
XRReferenceSpaceType::Viewer => {
// This reference space follows the viewer around, so the viewer is
// always at an identity transform with respect to it
RigidTransform3D::identity()
Some(RigidTransform3D::identity())
},
_ => unimplemented!(),
}
@ -134,34 +135,34 @@ impl XRReferenceSpace {
/// The reference origin used is common between all
/// get_pose calls for spaces from the same device, so this can be used to compare
/// with other spaces
pub fn get_pose(&self, base_pose: &Frame) -> ApiPose {
let pose = self.get_unoffset_pose(base_pose);
pub fn get_pose(&self, base_pose: &Frame) -> Option<ApiPose> {
let pose = self.get_unoffset_pose(base_pose)?;
let offset = self.offset.transform();
// pose is a transform from the unoffset space to native space,
// offset is a transform from offset space to unoffset space,
// we want a transform from unoffset space to native space,
// which is pose * offset in column vector notation
pose.pre_transform(&offset)
Some(pose.pre_transform(&offset))
}
/// Gets pose represented by this space
///
/// Does not apply originOffset, use get_viewer_pose instead if you need it
pub fn get_unoffset_pose(&self, base_pose: &Frame) -> ApiPose {
pub fn get_unoffset_pose(&self, base_pose: &Frame) -> Option<ApiPose> {
match self.ty {
XRReferenceSpaceType::Local => {
// The eye-level pose is basically whatever the headset pose was at t=0, which
// for most devices is (0, 0, 0)
RigidTransform3D::identity()
Some(RigidTransform3D::identity())
},
XRReferenceSpaceType::Local_floor => {
// XXXManishearth support getting floor info from stage parameters
// Assume approximate height of 2m
// the floor-level space is 2m below the eye-level space, which is (0, 0, 0)
Vector3D::new(0., -2., 0.).into()
let native_to_floor = self
.upcast::<XRSpace>()
.session()
.with_session(|s| s.floor_transform())?;
Some(cast_transform(native_to_floor.inverse()))
},
XRReferenceSpaceType::Viewer => cast_transform(base_pose.transform),
XRReferenceSpaceType::Viewer => base_pose.transform.map(cast_transform),
_ => unimplemented!(),
}
}

View file

@ -17,7 +17,7 @@ pub struct XRRenderState {
reflector_: Reflector,
depth_near: Cell<f64>,
depth_far: Cell<f64>,
inline_vertical_fov: Cell<f64>,
inline_vertical_fov: Cell<Option<f64>>,
layer: MutNullableDom<XRWebGLLayer>,
}
@ -25,7 +25,7 @@ impl XRRenderState {
pub fn new_inherited(
depth_near: f64,
depth_far: f64,
inline_vertical_fov: f64,
inline_vertical_fov: Option<f64>,
layer: Option<&XRWebGLLayer>,
) -> XRRenderState {
XRRenderState {
@ -41,7 +41,7 @@ impl XRRenderState {
global: &GlobalScope,
depth_near: f64,
depth_far: f64,
inline_vertical_fov: f64,
inline_vertical_fov: Option<f64>,
layer: Option<&XRWebGLLayer>,
) -> DomRoot<XRRenderState> {
reflect_dom_object(
@ -73,7 +73,8 @@ impl XRRenderState {
self.depth_far.set(depth)
}
pub fn set_inline_vertical_fov(&self, fov: f64) {
self.inline_vertical_fov.set(fov)
debug_assert!(self.inline_vertical_fov.get().is_some());
self.inline_vertical_fov.set(Some(fov))
}
pub fn set_layer(&self, layer: Option<&XRWebGLLayer>) {
self.layer.set(layer)
@ -92,8 +93,8 @@ impl XRRenderStateMethods for XRRenderState {
}
/// https://immersive-web.github.io/webxr/#dom-xrrenderstate-inlineverticalfieldofview
fn InlineVerticalFieldOfView(&self) -> Finite<f64> {
Finite::wrap(self.inline_vertical_fov.get())
fn GetInlineVerticalFieldOfView(&self) -> Option<Finite<f64>> {
self.inline_vertical_fov.get().map(Finite::wrap)
}
/// https://immersive-web.github.io/webxr/#dom-xrrenderstate-baselayer

View file

@ -43,8 +43,10 @@ use dom_struct::dom_struct;
use euclid::{Rect, RigidTransform3D, Transform3D};
use ipc_channel::ipc::IpcSender;
use ipc_channel::router::ROUTER;
use metrics::ToMs;
use profile_traits::ipc;
use std::cell::Cell;
use std::f64::consts::{FRAC_PI_2, PI};
use std::mem;
use std::rc::Rc;
use webxr_api::{
@ -72,7 +74,7 @@ pub struct XRSession {
#[ignore_malloc_size_of = "closures are hard"]
raf_callback_list: DomRefCell<Vec<(i32, Option<Rc<XRFrameRequestCallback>>)>>,
#[ignore_malloc_size_of = "defined in ipc-channel"]
raf_sender: DomRefCell<Option<IpcSender<(f64, Frame)>>>,
raf_sender: DomRefCell<Option<IpcSender<Frame>>>,
input_sources: Dom<XRInputSourceArray>,
// Any promises from calling end()
#[ignore_malloc_size_of = "promises are hard"]
@ -115,8 +117,12 @@ impl XRSession {
}
pub fn new(global: &GlobalScope, session: Session, mode: XRSessionMode) -> DomRoot<XRSession> {
use std::f64::consts::FRAC_PI_2;
let render_state = XRRenderState::new(global, 0.1, 1000.0, FRAC_PI_2, None);
let ivfov = if mode == XRSessionMode::Inline {
Some(FRAC_PI_2)
} else {
None
};
let render_state = XRRenderState::new(global, 0.1, 1000.0, ivfov, None);
let input_sources = XRInputSourceArray::new(global);
let ret = reflect_dom_object(
Box::new(XRSession::new_inherited(
@ -300,7 +306,7 @@ impl XRSession {
}
/// https://immersive-web.github.io/webxr/#xr-animation-frame
fn raf_callback(&self, (time, mut frame): (f64, Frame)) {
fn raf_callback(&self, mut frame: Frame) {
debug!("WebXR RAF callback");
// Step 1
@ -333,6 +339,8 @@ impl XRSession {
// Step 4-5
let mut callbacks = mem::replace(&mut *self.raf_callback_list.borrow_mut(), vec![]);
let start = self.global().as_window().get_navigation_start();
let time = (frame.time_ns - start).to_ms();
let frame = XRFrame::new(&self.global(), self, frame);
// Step 6,7
@ -376,7 +384,10 @@ impl XRSession {
let near = *render_state.DepthNear() as f32;
let far = *render_state.DepthFar() as f32;
clip_planes.update(near, far);
let top = *render_state.InlineVerticalFieldOfView() / 2.;
let top = *render_state
.GetInlineVerticalFieldOfView()
.expect("IVFOV should be non null for inline sessions") /
2.;
let top = near * top.tan() as f32;
let bottom = top;
let left = top * size.width as f32 / size.height as f32;
@ -451,7 +462,7 @@ impl XRSessionMethods for XRSession {
}
// Step 4:
if init.inlineVerticalFieldOfView.is_some() {
if init.inlineVerticalFieldOfView.is_some() && self.is_immersive() {
return Err(Error::InvalidState);
}
@ -459,13 +470,33 @@ impl XRSessionMethods for XRSession {
.pending_render_state
.or_init(|| self.active_render_state.get().clone_object());
if let Some(near) = init.depthNear {
pending.set_depth_near(*near);
let mut near = *near;
// Step 8 from #apply-the-pending-render-state
// this may need to be changed if backends wish to impose
// further constraints
if near < 0. {
near = 0.;
}
pending.set_depth_near(near);
}
if let Some(far) = init.depthFar {
// Step 9 from #apply-the-pending-render-state
// this may need to be changed if backends wish to impose
// further constraints
// currently the maximum is infinity, so we do nothing
pending.set_depth_far(*far);
}
if let Some(fov) = init.inlineVerticalFieldOfView {
pending.set_inline_vertical_fov(*fov);
let mut fov = *fov;
// Step 10 from #apply-the-pending-render-state
// this may need to be changed if backends wish to impose
// further constraints
if fov < 0. {
fov = 0.0001;
} else if fov > PI {
fov = PI - 0.0001;
}
pending.set_inline_vertical_fov(fov);
}
if let Some(ref layer) = init.baseLayer {
pending.set_layer(Some(&layer))

View file

@ -68,7 +68,7 @@ impl XRSpace {
/// with other spaces
pub fn get_pose(&self, base_pose: &Frame) -> Option<ApiPose> {
if let Some(reference) = self.downcast::<XRReferenceSpace>() {
Some(reference.get_pose(base_pose))
reference.get_pose(base_pose)
} else if let Some(source) = self.input_source.get() {
// XXXManishearth we should be able to request frame information
// for inputs when necessary instead of always loading it

View file

@ -21,7 +21,6 @@ use crate::dom::promise::Promise;
use crate::script_thread::ScriptThread;
use crate::task_source::TaskSource;
use dom_struct::dom_struct;
use euclid::RigidTransform3D;
use ipc_channel::ipc::IpcSender;
use ipc_channel::router::ROUTER;
use profile_traits::ipc;
@ -75,26 +74,26 @@ impl XRTestMethods for XRTest {
let origin = if let Some(ref o) = init.viewerOrigin {
match get_origin(&o) {
Ok(origin) => origin,
Ok(origin) => Some(origin),
Err(e) => {
p.reject_error(e);
return p;
},
}
} else {
RigidTransform3D::identity()
None
};
let floor_origin = if let Some(ref o) = init.floorOrigin {
match get_origin(&o) {
Ok(origin) => origin,
Ok(origin) => Some(origin),
Err(e) => {
p.reject_error(e);
return p;
},
}
} else {
RigidTransform3D::identity()
None
};
let views = match get_views(&init.views) {

View file

@ -20,7 +20,7 @@ use crate::dom::xrview::XRView;
use crate::dom::xrviewport::XRViewport;
use canvas_traits::webgl::WebGLFramebufferId;
use dom_struct::dom_struct;
use euclid::Size2D;
use euclid::{Point2D, Rect, Size2D};
use std::convert::TryInto;
use webxr_api::SwapChainId as WebXRSwapChainId;
use webxr_api::{Viewport, Views};
@ -211,11 +211,13 @@ impl XRWebGLLayerMethods for XRWebGLLayer {
let views = self.session.with_session(|s| s.views().clone());
let viewport = match (view.Eye(), views) {
(XREye::None, Views::Inline) => {
let origin = Point2D::new(0, 0);
Rect::new(origin, self.size().cast())
},
(XREye::None, Views::Mono(view)) => view.viewport,
(XREye::Left, Views::Stereo(view, _)) => view.viewport,
(XREye::Right, Views::Stereo(_, view)) => view.viewport,
// The spec doesn't really say what to do in this case
// https://github.com/immersive-web/webxr/issues/769
_ => return None,
};