Add linux-pref job (#33261)

Job will do some performance benchmarks (Dromeo, Speedometer) and mesure binary size and will report results to bencher.dev

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>
Co-authored-by: DK Liao <dklassic@gmail.com>
This commit is contained in:
Samson 2024-12-06 08:32:26 +01:00 committed by GitHub
parent 61ca2dde29
commit faefed9869
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 180 additions and 0 deletions

View file

@ -17,6 +17,9 @@ on:
unit-tests:
required: true
type: boolean
bencher:
required: true
type: boolean
jobs:
win:
@ -49,6 +52,7 @@ jobs:
wpt-layout: ${{ inputs.wpt-layout }}
unit-tests: ${{ inputs.unit-tests }}
wpt-args: ${{ inputs.wpt-args }}
bencher: ${{ inputs.bencher }}
lint:
if: ${{ inputs.workflow == 'lint' }}

85
.github/workflows/linux-bencher.yml vendored Normal file
View file

@ -0,0 +1,85 @@
name: Linux WPT Tests
on:
workflow_call:
permissions:
checks: write
pull-requests: write
env:
RUST_BACKTRACE: 1
SHELL: /bin/bash
# allows overriding bencher project for pushes
BENCHER_PROJECT: ${{ vars.BENCHER_PROJECT || 'servo' }}
jobs:
linux-bencher:
name: Bencher
# keep this in sync with testbed
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
if: github.event_name != 'pull_request_target'
with:
fetch-depth: 0
# This is necessary to checkout the pull request if this run was triggered via a
# `pull_request_target` event.
- uses: actions/checkout@v4
if: github.event_name == 'pull_request_target'
with:
ref: refs/pull/${{ github.event.number }}/head
fetch-depth: 0
- uses: actions/download-artifact@v4
with:
name: release-binary-linux
path: release-binary-linux
- name: unPackage binary
run: tar -xzf release-binary-linux/target.tar.gz
- name: Bootstrap dependencies
run: |
python3 -m pip install --upgrade pip
sudo apt update
sudo apt install -qy --no-install-recommends mesa-vulkan-drivers
python3 ./mach bootstrap --skip-lints
- uses: bencherdev/bencher@main
- name: File size
run: ./etc/ci/bencher.py filesize target/release/servo --bmf-output size.json
- name: Speedometer
run: |
python3 ./mach test-speedometer -r --bmf-output speedometer.json
- name: Dromaeo
run: |
python3 ./mach test-dromaeo -r dom --bmf-output dromaeo.json
# set options
- name: Set bencher opts for PRs (label try run)
if: github.event_name == 'pull_request_target'
run: |
echo "RUN_BENCHER_OPTIONS=--branch ${{ github.event.number }}/PR \
--branch-start-point ${{ github.base_ref }} \
--branch-start-point-hash ${{ github.event.pull_request.base.sha }} \
--branch-reset \
--github-actions ${{ secrets.GITHUB_TOKEN }}" >> "$GITHUB_ENV"
- name: Set bencher opts for main
if: ${{ github.event_name == 'push' && github.ref_name == 'main' }}
run: |
echo "RUN_BENCHER_OPTIONS=--branch main \
--github-actions ${{ secrets.GITHUB_TOKEN }}" >> "$GITHUB_ENV"
- name: Set bencher opts for try branch
if: ${{ github.event_name == 'push' && github.ref_name == 'try' }}
run: |
git remote add upstream https://github.com/servo/servo
git fetch upstream main
echo "RUN_BENCHER_OPTIONS=--branch try \
--github-actions ${{ secrets.GITHUB_TOKEN }} \
--hash $(git rev-parse HEAD~1) \
--branch-start-point main \
--branch-start-point-hash $(git merge-base upstream/main HEAD) \
--branch-reset" >> "$GITHUB_ENV"
# we join results and send all data once to have it all in one report
- name: Send results
continue-on-error: true
run: |
./etc/ci/bencher.py merge size.json speedometer.json dromaeo.json --bmf-output b.json
bencher run --adapter json --file b.json \
--project ${{ env.BENCHER_PROJECT }} --token ${{ secrets.BENCHER_API_TOKEN }} --testbed ubuntu-22.04 \
$RUN_BENCHER_OPTIONS

View file

@ -32,6 +32,10 @@ on:
required: false
type: boolean
default: false
bencher:
required: false
default: false
type: boolean
workflow_dispatch:
inputs:
profile:
@ -63,6 +67,10 @@ on:
required: false
type: boolean
default: false
bencher:
required: false
default: false
type: boolean
env:
RUST_BACKTRACE: 1
@ -219,3 +227,10 @@ jobs:
wpt-sync-from-upstream: ${{ inputs.wpt-sync-from-upstream }}
wpt-layout: "layout-2013"
secrets: inherit
bencher:
needs: ["build"]
# benches must be release (we will do benches for production profile in servo/servo-nightly-builds)
if: ${{ inputs.bencher && inputs.profile == 'release' && github.event_name != 'workflow_dispatch' && github.event_name != 'merge_queue' }}
uses: ./.github/workflows/linux-bencher.yml
secrets: inherit

View file

@ -54,6 +54,7 @@ jobs:
profile: ${{ matrix.profile }}
unit-tests: ${{ matrix.unit_tests }}
wpt-args: ${{ matrix.wpt_args }}
bencher: ${{ matrix.bencher }}
build-result:
name: Result

View file

@ -137,6 +137,7 @@ jobs:
profile: ${{ matrix.profile }}
unit-tests: ${{ matrix.unit_tests }}
wpt-args: ${{ matrix.wpt_args }}
bencher: ${{ matrix.bencher }}
results:
name: Results

View file

@ -21,6 +21,9 @@ on:
unit-tests:
required: false
type: boolean
bencher:
required: false
type: boolean
jobs:
decision:
@ -104,6 +107,7 @@ jobs:
profile: ${{ matrix.profile }}
unit-tests: ${{ matrix.unit_tests }}
wpt-args: ${{ matrix.wpt_args }}
bencher: ${{ matrix.bencher }}
build-result:
name: Result

58
etc/ci/bencher.py Executable file
View file

@ -0,0 +1,58 @@
#!/usr/bin/env python3
# Copyright 2024 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import argparse
import json
import os
def size(args):
size = os.path.getsize(args.binary)
print(size)
with open(args.bmf_output, 'w', encoding='utf-8') as f:
json.dump({
'servo': {
'file-size': {
'value': float(size),
}
}
}, f, indent=4)
def merge(args):
output: dict[str, object] = dict()
for input_file in args.inputs:
with open(input_file, 'r', encoding='utf-8') as f:
data = json.load(f)
diff = set(data) & set(output)
if diff:
print("Duplicated keys:", diff)
output = data | output
with open(args.bmf_output, 'w', encoding='utf-8') as f:
json.dump(output, f, indent=4)
parser = argparse.ArgumentParser("Helper commands for bencher")
subparser = parser.add_subparsers()
size_parser = subparser.add_parser("filesize", help="Returns BMF for filesize")
size_parser.add_argument("binary", help="Servo binary file")
size_parser.add_argument("--bmf-output", help="BMF JSON output file", default=None)
size_parser.set_defaults(func=size)
merge_parser = subparser.add_parser("merge", help="Merges BMF JSONs")
merge_parser.add_argument("--bmf-output", help="BMF JSON output file")
merge_parser.add_argument("inputs", help="BMF JSON files to merge", nargs="+")
merge_parser.set_defaults(func=merge)
args = parser.parse_args()
args.func(args)

View file

@ -57,6 +57,7 @@ class JobConfig(object):
wpt_layout: Layout = Layout.none
profile: str = "release"
unit_tests: bool = False
bencher: bool = False
wpt_args: str = ""
# These are the fields that must match in between two JobConfigs for them to be able to be
# merged. If you modify any of the fields above, make sure to update this line as well.
@ -74,6 +75,7 @@ class JobConfig(object):
# to join "Linux" and "Linux WPT" into "Linux WPT"
if len(other.name) > len(self.name):
self.name = other.name
self.bencher |= other.bencher
return True
@ -82,6 +84,8 @@ def handle_preset(s: str) -> Optional[JobConfig]:
if s == "linux":
return JobConfig("Linux", Workflow.LINUX, unit_tests=True)
elif s in ["perf", "linux-perf", "bencher"]:
return JobConfig("Linux perf", Workflow.LINUX, bencher=True)
elif s in ["mac", "macos"]:
return JobConfig("MacOS", Workflow.MACOS, unit_tests=True)
elif s in ["win", "windows"]:
@ -175,6 +179,7 @@ class TestParser(unittest.TestCase):
self.assertDictEqual(json.loads(Config("linux fail-fast").to_json()),
{'fail_fast': True,
'matrix': [{
'bencher': False,
'name': 'Linux',
'profile': 'release',
'unit_tests': True,
@ -188,6 +193,7 @@ class TestParser(unittest.TestCase):
self.assertDictEqual(json.loads(Config("").to_json()),
{"fail_fast": False, "matrix": [
{
'bencher': False,
"name": "Linux WPT",
"workflow": "linux",
"wpt_layout": "2020",
@ -196,6 +202,7 @@ class TestParser(unittest.TestCase):
"wpt_args": ""
},
{
'bencher': False,
"name": "MacOS",
"workflow": "macos",
"wpt_layout": "none",
@ -204,6 +211,7 @@ class TestParser(unittest.TestCase):
"wpt_args": ""
},
{
'bencher': False,
"name": "Windows",
"workflow": "windows",
"wpt_layout": "none",
@ -212,6 +220,7 @@ class TestParser(unittest.TestCase):
"wpt_args": ""
},
{
'bencher': False,
"name": "Android",
"workflow": "android",
"wpt_layout": "none",
@ -220,6 +229,7 @@ class TestParser(unittest.TestCase):
"wpt_args": ""
},
{
'bencher': False,
"name": "OpenHarmony",
"workflow": "ohos",
"wpt_layout": "none",
@ -228,6 +238,7 @@ class TestParser(unittest.TestCase):
"wpt_args": ""
},
{
'bencher': False,
"name": "Lint",
"workflow": "lint",
"wpt_layout": "none",
@ -240,6 +251,7 @@ class TestParser(unittest.TestCase):
self.assertDictEqual(json.loads(Config("wpt-2020 wpt-2013").to_json()),
{'fail_fast': False,
'matrix': [{
'bencher': False,
'name': 'Linux WPT',
'profile': 'release',
'unit_tests': False,