tests: Vendor blink perf tests (#38654)

Vendors the [blink perf
tests](https://chromium.googlesource.com/chromium/src/+/HEAD/third_party/blink/perf_tests/).
These perf tests are useful to evaluate the performance of servo. 
The license that governs the perf tests is included in the folder. 
Running benchmark cases automatically is left to future work.

The update.py script is taken from mozjs and slightly adapted, so we can
easily filter
(and patch if this should be necessary in the future.

Testing: This PR just adds the perf_tests, but does not use or modify
them in any way.

---------

Signed-off-by: Jonathan Schwender <schwenderjonathan@gmail.com>
This commit is contained in:
Jonathan Schwender 2025-08-17 11:54:04 +02:00 committed by GitHub
parent 7621332824
commit ee781b71b4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
648 changed files with 359694 additions and 0 deletions

View file

@ -0,0 +1 @@
mixins: "//third_party/blink/renderer/modules/webgpu/COMMON_METADATA"

View file

@ -0,0 +1 @@
file://third_party/blink/renderer/modules/webgpu/OWNERS

View file

@ -0,0 +1,97 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test CPU performance of the GPURenderPassEncoder.draw binding
</title>
<script src="../resources/runner.js"></script>
<script src="./resources/webgpu-perf-utils.js"></script>
</head>
<body>
<canvas id="canvas" width=400 height=400></canvas>
<script>
(async () => {
const adapter = navigator.gpu && await navigator.gpu.requestAdapter();
if (!adapter) {
return skipTest('WebGPU not supported');
}
const device = await adapter.requestDevice();
const canvas = document.getElementById('canvas');
const context = canvas.getContext('webgpu');
const contextFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: contextFormat,
});
const pipeline = await device.createRenderPipelineAsync({
layout: 'auto',
vertex: {
module: device.createShaderModule({
code: `
const pos = array<vec2f, 3>(
vec2f(0.0, 0.5),
vec2f(-0.5, -0.5),
vec2f(0.5, -0.5));
@vertex
fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
return vec4f(pos[VertexIndex], 0.0, 1.0);
}`
}),
entryPoint: 'main',
},
fragment: {
module: device.createShaderModule({
code: `
@fragment
fn main() -> @location(0) vec4f {
return vec4f(0.0, 1.0, 0.0, 1.0);
}`
}),
entryPoint: 'main',
targets: [{
format: contextFormat,
}],
},
primitive: {
topology: 'triangle-list',
},
});
const renderPassDescriptor = {
colorAttachments: [{
view: undefined,
loadOp: 'clear',
clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
storeOp: 'store',
}],
};
const iterations = 10000;
PerfTestRunner.measureInnerRAFTime({
description: `CPU time for ${iterations} calls to GPURenderPassEncoder.draw`,
warmUpCount: 10,
run() {
const commandEncoder = device.createCommandEncoder();
renderPassDescriptor.colorAttachments[0].view = context.getCurrentTexture().createView();
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
passEncoder.setPipeline(pipeline);
for (let i = 0; i < iterations; ++i) {
passEncoder.draw(3, 1, 0, 0);
}
passEncoder.end();
device.queue.submit([commandEncoder.finish()]);
}
});
})();
</script>
</body>
</html>

View file

@ -0,0 +1,110 @@
<!DOCTYPE html>
<html>
<head>
<title>
Test CPU performance of the GPURenderPassEncoder.setBindGroup binding
</title>
<script src="../resources/runner.js"></script>
<script src="./resources/webgpu-perf-utils.js"></script>
</head>
<body>
<canvas id="canvas" width=400 height=400></canvas>
<script>
(async () => {
const adapter = navigator.gpu && await navigator.gpu.requestAdapter();
if (!adapter) {
return skipTest('WebGPU not supported');
}
const device = await adapter.requestDevice();
const canvas = document.getElementById('canvas');
const context = canvas.getContext('webgpu');
const contextFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: contextFormat,
});
const pipeline = await device.createRenderPipelineAsync({
layout: 'auto',
vertex: {
module: device.createShaderModule({
code: `
@group(0) @binding(0) var<storage, read> pos : array<vec2f, 3>;
@vertex
fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
return vec4f(pos[VertexIndex], 0.0, 1.0);
}`
}),
},
fragment: {
module: device.createShaderModule({
code: `
@fragment
fn main() -> @location(0) vec4f {
return vec4f(0.0, 1.0, 0.0, 1.0);
}`
}),
targets: [{
format: contextFormat,
}],
},
});
const renderPassDescriptor = {
colorAttachments: [{
view: undefined,
loadOp: 'clear',
clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
storeOp: 'store',
}],
};
const posBuffer = device.createBuffer({
size: Float32Array.BYTES_PER_ELEMENT * 2 * 3,
mappedAtCreation: true,
usage: GPUBufferUsage.STORAGE,
});
new Float32Array(posBuffer.getMappedRange()).set([
0.0, 0.5,
-0.5, -0.5,
0.5, -0.5,
]);
posBuffer.unmap();
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [{
binding: 0,
resource: { buffer: posBuffer },
}],
});
const iterations = 10000;
PerfTestRunner.measureInnerRAFTime({
description: `CPU time for ${iterations} calls to GPURenderPassEncoder.setBindGroup`,
warmUpCount: 100,
run() {
const commandEncoder = device.createCommandEncoder();
renderPassDescriptor.colorAttachments[0].view = context.getCurrentTexture().createView();
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
passEncoder.setPipeline(pipeline);
for (let i = 0; i < iterations; ++i) {
passEncoder.setBindGroup(0, bindGroup);
}
passEncoder.draw(3, 1, 0, 0);
passEncoder.end();
device.queue.submit([commandEncoder.finish()]);
}
});
})();
</script>
</body>
</html>

View file

@ -0,0 +1,16 @@
function skipTest(message) {
PerfTestRunner.log(message);
const skip = () => {
if (window.testRunner) {
testRunner.notifyDone();
}
}
if (window.testRunner && window.testRunner.telemetryIsRunning) {
testRunner.waitForTelemetry([], skip);
} else {
skip();
}
}