mirror of
https://github.com/servo/servo.git
synced 2025-08-05 05:30:08 +01:00
Auto merge of #9607 - mbrubeck:chaos, r=jgraham
Add a "mach test-wpt --chaos" mode for reproducing intermittent failures using rr This adds a new `--chaos` flag to the `test-wpt` and `test-css` commands. This will run a test repeatedly until it fails, recording it with the rr debugger in chaos mode. Requires a recent master build of rr. ``` ./mach test-wpt --chaos test.html ``` is shorthand for: ``` ./mach test-wpt --repeat-until-unexpected \ --debugger rr \ --debugger-args "record --chaos" \ --include test.html ``` This PR currently includes changes to wptrunner which were also submitted upstream as w3c/wptrunner#170. We should not merge this until that upstream PR is merged. CC @jgraham <!-- Reviewable:start --> [<img src="https://reviewable.io/review_button.svg" height="40" alt="Review on Reviewable"/>](https://reviewable.io/reviews/servo/servo/9607) <!-- Reviewable:end -->
This commit is contained in:
commit
faa349fb87
3 changed files with 28 additions and 15 deletions
|
@ -41,6 +41,8 @@ def create_parser_wpt():
|
|||
parser = wptcommandline.create_parser()
|
||||
parser.add_argument('--release', default=False, action="store_true",
|
||||
help="Run with a release build of servo")
|
||||
parser.add_argument('--chaos', default=False, action="store_true",
|
||||
help="Run under chaos mode in rr until a failure is captured")
|
||||
return parser
|
||||
|
||||
|
||||
|
@ -332,19 +334,24 @@ class MachCommands(CommandBase):
|
|||
@Command('test-wpt',
|
||||
description='Run the web platform tests',
|
||||
category='testing',
|
||||
parser=wptcommandline.create_parser)
|
||||
@CommandArgument('--release', default=False, action="store_true",
|
||||
help="Run with a release build of servo")
|
||||
parser=create_parser_wpt)
|
||||
def test_wpt(self, **kwargs):
|
||||
self.ensure_bootstrapped()
|
||||
hosts_file_path = path.join(self.context.topdir, 'tests', 'wpt', 'hosts')
|
||||
|
||||
os.environ["hosts_file_path"] = hosts_file_path
|
||||
os.environ["RUST_BACKTRACE"] = "1"
|
||||
|
||||
kwargs["debug"] = not kwargs["release"]
|
||||
|
||||
run_file = path.abspath(path.join(self.context.topdir, "tests", "wpt", "run_wpt.py"))
|
||||
return self.wptrunner(run_file, **kwargs)
|
||||
|
||||
# Helper for test_css and test_wpt:
|
||||
def wptrunner(self, run_file, **kwargs):
|
||||
os.environ["RUST_BACKTRACE"] = "1"
|
||||
kwargs["debug"] = not kwargs["release"]
|
||||
if kwargs.pop("chaos"):
|
||||
kwargs["debugger"] = "rr"
|
||||
kwargs["debugger_args"] = "record --chaos"
|
||||
kwargs["repeat_until_unexpected"] = True
|
||||
# TODO: Delete rr traces from green test runs?
|
||||
|
||||
run_globals = {"__file__": run_file}
|
||||
execfile(run_file, run_globals)
|
||||
return run_globals["run_tests"](**kwargs)
|
||||
|
@ -398,11 +405,8 @@ class MachCommands(CommandBase):
|
|||
parser=create_parser_wpt)
|
||||
def test_css(self, **kwargs):
|
||||
self.ensure_bootstrapped()
|
||||
|
||||
run_file = path.abspath(path.join("tests", "wpt", "run_css.py"))
|
||||
run_globals = {"__file__": run_file}
|
||||
execfile(run_file, run_globals)
|
||||
return run_globals["run_tests"](**kwargs)
|
||||
return self.wptrunner(run_file, **kwargs)
|
||||
|
||||
@Command('update-css',
|
||||
description='Update the web platform tests',
|
||||
|
|
|
@ -72,6 +72,8 @@ def create_parser(product_choices=None):
|
|||
help="Multiplier relative to standard test timeout to use")
|
||||
parser.add_argument("--repeat", action="store", type=int, default=1,
|
||||
help="Number of times to run the tests")
|
||||
parser.add_argument("--repeat-until-unexpected", action="store_true", default=None,
|
||||
help="Run tests in a loop until one returns an unexpected result")
|
||||
|
||||
parser.add_argument("--no-capture-stdio", action="store_true", default=False,
|
||||
help="Don't capture stdio and write to logging")
|
||||
|
|
|
@ -160,10 +160,15 @@ def run_tests(config, test_paths, product, **kwargs):
|
|||
browser_kwargs = get_browser_kwargs(ssl_env=ssl_env, **kwargs)
|
||||
|
||||
repeat = kwargs["repeat"]
|
||||
for repeat_count in xrange(repeat):
|
||||
if repeat > 1:
|
||||
logger.info("Repetition %i / %i" % (repeat_count + 1, repeat))
|
||||
repeat_count = 0
|
||||
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
|
||||
|
||||
while repeat_count < repeat or repeat_until_unexpected:
|
||||
repeat_count += 1
|
||||
if repeat_until_unexpected:
|
||||
logger.info("Repetition %i" % (repeat_count))
|
||||
elif repeat > 1:
|
||||
logger.info("Repetition %i / %i" % (repeat_count, repeat))
|
||||
|
||||
unexpected_count = 0
|
||||
logger.suite_start(test_loader.test_ids, run_info)
|
||||
|
@ -208,6 +213,8 @@ def run_tests(config, test_paths, product, **kwargs):
|
|||
|
||||
unexpected_total += unexpected_count
|
||||
logger.info("Got %i unexpected results" % unexpected_count)
|
||||
if repeat_until_unexpected and unexpected_total > 0:
|
||||
break
|
||||
logger.suite_end()
|
||||
|
||||
return unexpected_total == 0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue