mirror of
https://github.com/servo/servo.git
synced 2025-08-20 12:55:33 +01:00
Update web-platform-tests to revision 8a2ceb5f18911302b7a5c1cd2791f4ab50ad4326
This commit is contained in:
parent
462c272380
commit
1f531f66ea
5377 changed files with 174916 additions and 84369 deletions
|
@ -1,23 +0,0 @@
|
|||
import argparse
|
||||
import browser
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('browser', choices=['firefox', 'chrome'],
|
||||
help='name of web browser product')
|
||||
parser.add_argument('component', choices=['browser', 'webdriver'],
|
||||
help='name of component')
|
||||
parser.add_argument('-d', '--destination',
|
||||
help='filesystem directory to place the component')
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
|
||||
Subclass = getattr(browser, args.browser.title())
|
||||
if args.component == 'webdriver':
|
||||
method = 'install_webdriver'
|
||||
else:
|
||||
method = 'install'
|
||||
|
||||
sys.stdout.write('Now installing %s %s...\n' % (args.browser, args.component))
|
||||
getattr(Subclass(), method)(dest=args.destination)
|
83
tests/wpt/web-platform-tests/tools/certs/cacert.pem
Normal file
83
tests/wpt/web-platform-tests/tools/certs/cacert.pem
Normal file
|
@ -0,0 +1,83 @@
|
|||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number: 1 (0x1)
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
Issuer: CN=web-platform-tests
|
||||
Validity
|
||||
Not Before: Dec 22 12:09:15 2014 GMT
|
||||
Not After : Dec 21 12:09:15 2024 GMT
|
||||
Subject: CN=web-platform-tests
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
Public-Key: (2048 bit)
|
||||
Modulus:
|
||||
00:c0:3e:c3:bd:b7:9e:2e:3e:ec:1f:a5:ae:9f:85:
|
||||
8d:c0:59:a4:40:a8:fe:66:cf:ef:7d:1f:4a:91:52:
|
||||
e6:55:0f:e5:69:b8:44:da:62:17:c7:88:28:ad:ec:
|
||||
6d:b0:00:cd:a1:69:0e:e8:19:84:58:ad:e9:ce:31:
|
||||
50:6c:a4:82:14:91:08:5a:a3:ae:c8:49:13:19:18:
|
||||
7b:5b:2b:44:30:eb:bf:c7:7c:bb:d4:32:17:6a:4d:
|
||||
eb:84:f1:65:9b:9d:21:3e:91:ae:74:75:b3:7c:b4:
|
||||
cd:fa:98:73:d4:7c:d1:5d:1b:89:72:f7:d4:52:bd:
|
||||
05:a3:c1:cf:b6:58:e7:51:ec:c2:71:c7:f6:0b:00:
|
||||
97:58:f9:cb:c3:18:46:7b:55:0f:90:bf:da:a3:7d:
|
||||
d6:c5:48:ea:b3:b5:a8:12:96:ac:38:65:10:b9:b1:
|
||||
69:cb:4e:3b:4c:c3:83:74:33:63:b4:cc:fe:65:c1:
|
||||
ad:12:51:f1:02:72:50:49:03:ab:a6:28:20:41:15:
|
||||
ca:77:15:d9:85:55:69:9d:31:c1:db:12:be:46:db:
|
||||
e6:d3:8f:2f:66:2d:9b:61:08:30:57:6c:d9:4f:b1:
|
||||
6a:a8:e5:90:e3:e2:68:96:45:6e:3f:de:5e:13:fe:
|
||||
8a:bd:f1:3f:7f:26:ec:3c:1a:80:b0:a8:ec:52:c5:
|
||||
11:03
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Basic Constraints:
|
||||
CA:TRUE
|
||||
X509v3 Subject Key Identifier:
|
||||
6A:AB:53:64:92:36:87:23:34:B3:1D:6F:85:4B:F5:DF:5A:5C:74:8F
|
||||
X509v3 Authority Key Identifier:
|
||||
keyid:6A:AB:53:64:92:36:87:23:34:B3:1D:6F:85:4B:F5:DF:5A:5C:74:8F
|
||||
DirName:/CN=web-platform-tests
|
||||
serial:01
|
||||
|
||||
X509v3 Key Usage:
|
||||
Certificate Sign
|
||||
X509v3 Extended Key Usage:
|
||||
TLS Web Server Authentication
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
46:af:02:04:97:2f:5b:00:11:c5:8f:c4:e1:2b:23:62:d1:61:
|
||||
4a:28:ed:82:82:57:d7:28:65:88:a5:61:16:20:37:02:90:16:
|
||||
0b:05:43:46:db:bd:3d:b4:4d:1c:6e:85:ff:5d:dc:0f:a4:a4:
|
||||
29:98:24:ae:39:ab:e4:97:a9:10:bc:a5:b9:4b:c1:2e:5a:ce:
|
||||
89:32:00:f1:fc:94:35:a6:71:c8:9c:d9:05:43:44:6c:68:62:
|
||||
ae:b1:71:20:17:5f:c4:fb:ae:05:e6:26:e5:41:88:cc:db:51:
|
||||
55:ed:85:a0:c9:e5:68:65:a7:fa:7a:db:8f:81:61:60:50:0b:
|
||||
16:b0:10:49:19:bb:70:0e:37:09:03:20:e8:a2:b9:e5:eb:c2:
|
||||
6a:7b:4f:60:cd:fb:22:0b:27:c6:0d:2d:e2:32:cc:28:de:c6:
|
||||
e2:14:6a:ad:3f:c4:6e:78:9d:71:24:9b:56:c4:54:28:e9:ec:
|
||||
09:6e:34:cc:6d:5c:bc:e6:68:96:44:ff:62:d0:54:a0:a4:37:
|
||||
d8:f7:9f:bc:bb:dc:ad:2c:49:e2:64:b9:4d:aa:e4:22:e6:df:
|
||||
3a:17:23:13:c1:aa:0e:94:27:46:5d:11:b9:0b:dc:3d:cf:93:
|
||||
20:eb:18:56:c5:ac:e3:92:eb:55:d3:cb:ce:e4:9c:21:85:d6:
|
||||
21:93:92:4f
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDTzCCAjegAwIBAgIBATANBgkqhkiG9w0BAQsFADAdMRswGQYDVQQDDBJ3ZWIt
|
||||
cGxhdGZvcm0tdGVzdHMwHhcNMTQxMjIyMTIwOTE1WhcNMjQxMjIxMTIwOTE1WjAd
|
||||
MRswGQYDVQQDDBJ3ZWItcGxhdGZvcm0tdGVzdHMwggEiMA0GCSqGSIb3DQEBAQUA
|
||||
A4IBDwAwggEKAoIBAQDAPsO9t54uPuwfpa6fhY3AWaRAqP5mz+99H0qRUuZVD+Vp
|
||||
uETaYhfHiCit7G2wAM2haQ7oGYRYrenOMVBspIIUkQhao67ISRMZGHtbK0Qw67/H
|
||||
fLvUMhdqTeuE8WWbnSE+ka50dbN8tM36mHPUfNFdG4ly99RSvQWjwc+2WOdR7MJx
|
||||
x/YLAJdY+cvDGEZ7VQ+Qv9qjfdbFSOqztagSlqw4ZRC5sWnLTjtMw4N0M2O0zP5l
|
||||
wa0SUfECclBJA6umKCBBFcp3FdmFVWmdMcHbEr5G2+bTjy9mLZthCDBXbNlPsWqo
|
||||
5ZDj4miWRW4/3l4T/oq98T9/Juw8GoCwqOxSxREDAgMBAAGjgZkwgZYwDAYDVR0T
|
||||
BAUwAwEB/zAdBgNVHQ4EFgQUaqtTZJI2hyM0sx1vhUv131pcdI8wRQYDVR0jBD4w
|
||||
PIAUaqtTZJI2hyM0sx1vhUv131pcdI+hIaQfMB0xGzAZBgNVBAMMEndlYi1wbGF0
|
||||
Zm9ybS10ZXN0c4IBATALBgNVHQ8EBAMCAgQwEwYDVR0lBAwwCgYIKwYBBQUHAwEw
|
||||
DQYJKoZIhvcNAQELBQADggEBAEavAgSXL1sAEcWPxOErI2LRYUoo7YKCV9coZYil
|
||||
YRYgNwKQFgsFQ0bbvT20TRxuhf9d3A+kpCmYJK45q+SXqRC8pblLwS5azokyAPH8
|
||||
lDWmccic2QVDRGxoYq6xcSAXX8T7rgXmJuVBiMzbUVXthaDJ5Whlp/p624+BYWBQ
|
||||
CxawEEkZu3AONwkDIOiiueXrwmp7T2DN+yILJ8YNLeIyzCjexuIUaq0/xG54nXEk
|
||||
m1bEVCjp7AluNMxtXLzmaJZE/2LQVKCkN9j3n7y73K0sSeJkuU2q5CLm3zoXIxPB
|
||||
qg6UJ0ZdEbkL3D3PkyDrGFbFrOOS61XTy87knCGF1iGTkk8=
|
||||
-----END CERTIFICATE-----
|
30
tests/wpt/web-platform-tests/tools/certs/cakey.pem
Normal file
30
tests/wpt/web-platform-tests/tools/certs/cakey.pem
Normal file
|
@ -0,0 +1,30 @@
|
|||
-----BEGIN ENCRYPTED PRIVATE KEY-----
|
||||
MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIb9ES7h6YGBcCAggA
|
||||
MBQGCCqGSIb3DQMHBAi2ZVLgq1XvagSCBMjAmGEVvjwXvF00veuQmJsmcjVHB/qP
|
||||
VqSXjCQ94orZb89UfFnPO9zXdvLWxrwb5WP6bbQv+Sh4htExXCD5XZi1AzWNCybe
|
||||
da0vvQGgjdzUh2fCrG4K7J0w20lrYgw3HVSj/WtmdbdZFdoX+BgXrxcqkE3M5opZ
|
||||
3UD3yIQeXSxUkh3iv6zzZaWujxjDI2JpwxRmMVbrr8OeBrKJsqB2DnKmq+emmvEF
|
||||
iXTN3Ww/Aj6GIqfPZ8jpVdwcVN5QpeHAh7b2lszt7GEOGcBhutPq4Aqy8PIiDR80
|
||||
sUYI7V8OXm+Y45DnfkvsogZEifOiUrQ2U+aGDu+Zt88661wVzjq+voJlz8EaIPCE
|
||||
B/NS2SgNqI2/DrjEEecn6hjgHWIUBwOfeNoSi1Tri6KZFyxG26LE/V8Cd50yodx9
|
||||
pBgFxdCbmYLeRcVeXW2bu0ZMjPddRlR5MHfrkM5ZAze7nRxoiyWnB/U8pPf+bQvx
|
||||
K4P9KcwCOeHigkaCYZKq7nmZyEy4km89zIugT/YWhMWyVwylTpagaiiJwYLjug8n
|
||||
CbFZWAkORBIl2g/YCuTBUJtC2IWX8kw+nYVwqBszpZyC6nbha2UmhQDfMAowQA5v
|
||||
n1LnV8I6f7u6HidbB8WX2UZoh03A4beCBz+dq2VaUquLTL4KQTIz+6rw7nEysrnH
|
||||
TIb8SlwsYAlzzwyyM9dSWt7iQeNjmH7zL0MozMs3LKHIrsWi7ZZh8BUYnT2vKdNV
|
||||
2ZLOMcR0tYVmVZ8uYkR9kny/fbZcKN54xScohA2UX261W+sWiEgN+RaBsQ79pFgi
|
||||
vYldfjaGNSvftXa590xn2tlS6/suB5MxiW5g3PuBg5XtVZ95l0f1n376Xh41sJv8
|
||||
YHrCtFHOlSpDJULGiXVh/wXBmS7qJ8DhnUUG699EdlsFf6Qg22WB3AZRvEJdYC4z
|
||||
P8W+jZ15NTDbHg3Hv7/CFYVzbXv2w0jkiqQgDF/wc6t/EdLD+2hzcN+nJGjtxZbn
|
||||
xjbXcg98CUMU+dc/aD4N45K9e9rPg3+iZLwvsRvwx+MszmgxxPv05pNyRO7RVk8r
|
||||
gkyyp9/CJFme+4nFKUc0dUy2yNXZtklTX0XKm/YNKin6uUMlIArIa54Cfvt9QslV
|
||||
iD+SxU1ZHmzwKT82+5ZeIRLNWvFV/9E4nD+BTagK2Fdwnsu1S2k7ItD9lK/cBPGS
|
||||
0tz1HWv4Auj3wMPZklp3SQluOl6eAIVqqI9GaX/d42DctBQWLTa27YibWyNIcw7o
|
||||
3N8GDREMawTBdDRwlZ3oT+yiGLX1c8ds2o0/4IcJlOkDoxXErmdlZo9oVe6z4R7g
|
||||
62yR53atVTLoUnAjxHXx0bJiyayv9Y3wjOEvuhuqdd9F+HOhTtAHr/BJQNhEk+z8
|
||||
531CZTJjb1p11PbOtHGV2IeB0S82mxkkXRykEXOb89ZpDHNRiMinThRkoCmuRI9r
|
||||
dTiES9B02yMPxJ3sLQyDxCoS5mwfcAqKTeK+yCvTvBy+t5tw63DbWlMp/7Ahy65K
|
||||
rWMHdwqwfoB+ZYw5sYZdPvuBVAT01I2JbOqX36RacQultFns2OinxOJHa1HjtXyS
|
||||
cPVEkMa7ci3Ym9j5RQNLVsgJe7YK9HixX5HjQFAowAH2pXZ5pKJIJYxPIUKtZlsz
|
||||
qbM=
|
||||
-----END ENCRYPTED PRIVATE KEY-----
|
|
@ -0,0 +1,28 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCzhNaLAVkYhdHc
|
||||
Mt8495CFGz6lXoE+L/w6X3937yO7OognD74lRs1jfcuV2KVQENKi0reX0Q1s+/kF
|
||||
6G+oS72VZ557lFipbZP94BLFzbSKZFIxXw7jiYRx2pjdS+wCJaV9Nf5j2rOs7KVG
|
||||
Dw1kI1xt8+zMKGMjwEua7I/B7rGiPnJNcLUJweu0EFU8i+oblH5LdOb0n0+mRTC1
|
||||
8Li00VlQZQqGU+pMn570WGwx9Rc6b1eLy1/wKAtFko0wIEn/UuYsyxia1+buPk80
|
||||
NRUTxQLaxV+++1vOjb+1NXY8fOacOyaHTY2A5hbGJ/JQSbZydENJSUQ4u3hDI+4W
|
||||
Ptli5qXXAgMBAAECggEBAIcwDQSnIjo2ZECHytQykpG6X6XXEksLhc1Lp0lhPC49
|
||||
uNR5pX6a4AcBb3PLr0opMQZO2tUoKA0ff3t0e8loKD+/xXhY0Z/dlioEOP7elwv0
|
||||
2nS1mhe9spCuxpk4GGXRhdtR8t2tj8s0do3YvgPgITXoEDX6YBZHNGhZpzSrFPgQ
|
||||
/c3eGCVmzWYuLFfdj5OPQ9bwTaY4JSvDLZT0/WTgiica7VySwfz3HP1fFqNykTiK
|
||||
ACQREvtxfk5Ym2nT6oni7CM2zOEJL9SXicXI5HO4bERH0ZYh//F3g6mwGiFXUJPd
|
||||
NKgaTM1oT9kRGkUaEYsRWrddwR8d5mXLvBuTJbgIsSECgYEA1+2uJSYRW1OqbhYP
|
||||
ms59YQHSs3VjpJpnCV2zNa2Wixs57KS2cOH7B6KrQCogJFLtgCDVLtyoErfVkD7E
|
||||
FivTgYr1pVCRppJddQzXik31uOINOBVffr7/09g3GcRN+ubHPZPq3K+dD6gHa3Aj
|
||||
0nH1EjEEV0QpSTQFn87OF2mc9wcCgYEA1NVqMbbzd+9Xft5FXuSbX6E+S02dOGat
|
||||
SgpnkTM80rjqa6eHdQzqk3JqyteHPgdi1vdYRlSPOj/X+6tySY0Ej9sRnYOfddA2
|
||||
kpiDiVkmiqVolyJPY69Utj+E3TzJ1vhCQuYknJmB7zP9tDcTxMeq0l/NaWvGshEK
|
||||
yC4UTQog1rECgYASOFILfGzWgfbNlzr12xqlRtwanHst9oFfPvLSQrWDQ2bd2wAy
|
||||
Aj+GY2mD3oobxouX1i1m6OOdwLlalJFDNauBMNKNgoDnx03vhIfjebSURy7KXrNS
|
||||
JJe9rm7n07KoyzRgs8yLlp3wJkOKA0pihY8iW9R78JpzPNqEo5SsURMXnQKBgBlV
|
||||
gfuC9H4tPjP6zzUZbyk1701VYsaI6k2q6WMOP0ox+q1v1p7nN7DvaKjWeOG4TVqb
|
||||
PKW6gQYE/XeWk9cPcyCQigs+1KdYbnaKsvWRaBYO1GFREzQhdarv6qfPCZOOH40J
|
||||
Cgid+Sp4/NULzU2aGspJ3xCSZKdjge4MFhyJfRkxAoGBAJlwqY4nue0MBLGNpqcs
|
||||
WwDtSasHvegKAcxGBKL5oWPbLBk7hk+hdqc8f6YqCkCNqv/ooBspL15ESItL+6yT
|
||||
zt0YkK4oH9tmLDb+rvqZ7ZdXbWSwKITMoCyyHUtT6OKt/RtA0Vdy9LPnP27oSO/C
|
||||
dk8Qf7KgKZLWo0ZNkvw38tEC
|
||||
-----END PRIVATE KEY-----
|
|
@ -0,0 +1,86 @@
|
|||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number: 2 (0x2)
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
Issuer: CN=web-platform-tests
|
||||
Validity
|
||||
Not Before: Dec 22 12:09:16 2014 GMT
|
||||
Not After : Dec 21 12:09:16 2024 GMT
|
||||
Subject: CN=web-platform.test
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
Public-Key: (2048 bit)
|
||||
Modulus:
|
||||
00:b3:84:d6:8b:01:59:18:85:d1:dc:32:df:38:f7:
|
||||
90:85:1b:3e:a5:5e:81:3e:2f:fc:3a:5f:7f:77:ef:
|
||||
23:bb:3a:88:27:0f:be:25:46:cd:63:7d:cb:95:d8:
|
||||
a5:50:10:d2:a2:d2:b7:97:d1:0d:6c:fb:f9:05:e8:
|
||||
6f:a8:4b:bd:95:67:9e:7b:94:58:a9:6d:93:fd:e0:
|
||||
12:c5:cd:b4:8a:64:52:31:5f:0e:e3:89:84:71:da:
|
||||
98:dd:4b:ec:02:25:a5:7d:35:fe:63:da:b3:ac:ec:
|
||||
a5:46:0f:0d:64:23:5c:6d:f3:ec:cc:28:63:23:c0:
|
||||
4b:9a:ec:8f:c1:ee:b1:a2:3e:72:4d:70:b5:09:c1:
|
||||
eb:b4:10:55:3c:8b:ea:1b:94:7e:4b:74:e6:f4:9f:
|
||||
4f:a6:45:30:b5:f0:b8:b4:d1:59:50:65:0a:86:53:
|
||||
ea:4c:9f:9e:f4:58:6c:31:f5:17:3a:6f:57:8b:cb:
|
||||
5f:f0:28:0b:45:92:8d:30:20:49:ff:52:e6:2c:cb:
|
||||
18:9a:d7:e6:ee:3e:4f:34:35:15:13:c5:02:da:c5:
|
||||
5f:be:fb:5b:ce:8d:bf:b5:35:76:3c:7c:e6:9c:3b:
|
||||
26:87:4d:8d:80:e6:16:c6:27:f2:50:49:b6:72:74:
|
||||
43:49:49:44:38:bb:78:43:23:ee:16:3e:d9:62:e6:
|
||||
a5:d7
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Basic Constraints:
|
||||
CA:FALSE
|
||||
X509v3 Subject Key Identifier:
|
||||
2D:98:A3:99:39:1C:FE:E9:9A:6D:17:94:D2:3A:96:EE:C8:9E:04:22
|
||||
X509v3 Authority Key Identifier:
|
||||
keyid:6A:AB:53:64:92:36:87:23:34:B3:1D:6F:85:4B:F5:DF:5A:5C:74:8F
|
||||
|
||||
X509v3 Key Usage:
|
||||
Digital Signature, Non Repudiation, Key Encipherment
|
||||
X509v3 Extended Key Usage:
|
||||
TLS Web Server Authentication
|
||||
X509v3 Subject Alternative Name:
|
||||
DNS:web-platform.test, DNS:www.web-platform.test, DNS:xn--n8j6ds53lwwkrqhv28a.web-platform.test, DNS:xn--lve-6lad.web-platform.test, DNS:www2.web-platform.test, DNS:www1.web-platform.test
|
||||
Signature Algorithm: sha256WithRSAEncryption
|
||||
33:db:f7:f0:f6:92:16:4f:2d:42:bc:b8:aa:e6:ab:5e:f9:b9:
|
||||
b0:48:ae:b5:8d:cc:02:7b:e9:6f:4e:75:f7:17:a0:5e:7b:87:
|
||||
06:49:48:83:c5:bb:ca:95:07:37:0e:5d:e3:97:de:9e:0c:a4:
|
||||
82:30:11:81:49:5d:50:29:72:92:a5:ca:17:b1:7c:f1:32:11:
|
||||
17:57:e6:59:c1:ac:e3:3b:26:d2:94:97:50:6a:b9:54:88:84:
|
||||
9b:6f:b1:06:f5:80:04:22:10:14:b1:f5:97:25:fc:66:d6:69:
|
||||
a3:36:08:85:23:ff:8e:3c:2b:e0:6d:e7:61:f1:00:8f:61:3d:
|
||||
b0:87:ad:72:21:f6:f0:cc:4f:c9:20:bf:83:11:0f:21:f4:b8:
|
||||
c0:dd:9c:51:d7:bb:27:32:ec:ab:a4:62:14:28:32:da:f2:87:
|
||||
80:68:9c:ea:ac:eb:f5:7f:f5:de:f4:c0:39:91:c8:76:a4:ee:
|
||||
d0:a8:50:db:c1:4b:f9:c4:3d:d9:e8:8e:b6:3f:c0:96:79:12:
|
||||
d8:fa:4d:0a:b3:36:76:aa:4e:b2:82:2f:a2:d4:0d:db:fd:64:
|
||||
77:6f:6e:e9:94:7f:0f:c8:3a:3c:96:3d:cd:4d:6c:ba:66:95:
|
||||
f7:b4:9d:a4:94:9f:97:b3:9a:0d:dc:18:8c:11:0b:56:65:8e:
|
||||
46:4c:e6:5e
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID2jCCAsKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAdMRswGQYDVQQDDBJ3ZWIt
|
||||
cGxhdGZvcm0tdGVzdHMwHhcNMTQxMjIyMTIwOTE2WhcNMjQxMjIxMTIwOTE2WjAc
|
||||
MRowGAYDVQQDExF3ZWItcGxhdGZvcm0udGVzdDCCASIwDQYJKoZIhvcNAQEBBQAD
|
||||
ggEPADCCAQoCggEBALOE1osBWRiF0dwy3zj3kIUbPqVegT4v/Dpff3fvI7s6iCcP
|
||||
viVGzWN9y5XYpVAQ0qLSt5fRDWz7+QXob6hLvZVnnnuUWKltk/3gEsXNtIpkUjFf
|
||||
DuOJhHHamN1L7AIlpX01/mPas6zspUYPDWQjXG3z7MwoYyPAS5rsj8HusaI+ck1w
|
||||
tQnB67QQVTyL6huUfkt05vSfT6ZFMLXwuLTRWVBlCoZT6kyfnvRYbDH1FzpvV4vL
|
||||
X/AoC0WSjTAgSf9S5izLGJrX5u4+TzQ1FRPFAtrFX777W86Nv7U1djx85pw7JodN
|
||||
jYDmFsYn8lBJtnJ0Q0lJRDi7eEMj7hY+2WLmpdcCAwEAAaOCASQwggEgMAkGA1Ud
|
||||
EwQCMAAwHQYDVR0OBBYEFC2Yo5k5HP7pmm0XlNI6lu7IngQiMB8GA1UdIwQYMBaA
|
||||
FGqrU2SSNocjNLMdb4VL9d9aXHSPMAsGA1UdDwQEAwIF4DATBgNVHSUEDDAKBggr
|
||||
BgEFBQcDATCBsAYDVR0RBIGoMIGlghF3ZWItcGxhdGZvcm0udGVzdIIVd3d3Lndl
|
||||
Yi1wbGF0Zm9ybS50ZXN0gil4bi0tbjhqNmRzNTNsd3drcnFodjI4YS53ZWItcGxh
|
||||
dGZvcm0udGVzdIIeeG4tLWx2ZS02bGFkLndlYi1wbGF0Zm9ybS50ZXN0ghZ3d3cy
|
||||
LndlYi1wbGF0Zm9ybS50ZXN0ghZ3d3cxLndlYi1wbGF0Zm9ybS50ZXN0MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQAz2/fw9pIWTy1CvLiq5qte+bmwSK61jcwCe+lvTnX3F6Be
|
||||
e4cGSUiDxbvKlQc3Dl3jl96eDKSCMBGBSV1QKXKSpcoXsXzxMhEXV+ZZwazjOybS
|
||||
lJdQarlUiISbb7EG9YAEIhAUsfWXJfxm1mmjNgiFI/+OPCvgbedh8QCPYT2wh61y
|
||||
IfbwzE/JIL+DEQ8h9LjA3ZxR17snMuyrpGIUKDLa8oeAaJzqrOv1f/Xe9MA5kch2
|
||||
pO7QqFDbwUv5xD3Z6I62P8CWeRLY+k0KszZ2qk6ygi+i1A3b/WR3b27plH8PyDo8
|
||||
lj3NTWy6ZpX3tJ2klJ+Xs5oN3BiMEQtWZY5GTOZe
|
||||
-----END CERTIFICATE-----
|
11
tests/wpt/web-platform-tests/tools/ci/before_install.sh
Executable file
11
tests/wpt/web-platform-tests/tools/ci/before_install.sh
Executable file
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [[ $(./wpt test-jobs --includes $JOB; echo $?) -eq 0 ]]; then
|
||||
export RUN_JOB=1
|
||||
git submodule update --init --recursive 1>&2
|
||||
export DISPLAY=:99.0
|
||||
sh -e /etc/init.d/xvfb start 1>&2
|
||||
else
|
||||
export RUN_JOB=0
|
||||
fi
|
385
tests/wpt/web-platform-tests/tools/ci/check_stability.py
Normal file
385
tests/wpt/web-platform-tests/tools/ci/check_stability.py
Normal file
|
@ -0,0 +1,385 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from ConfigParser import SafeConfigParser
|
||||
|
||||
import requests
|
||||
|
||||
wpt_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
|
||||
sys.path.insert(0, wpt_root)
|
||||
|
||||
from tools.wpt import testfiles
|
||||
from tools.wpt.testfiles import get_git_cmd
|
||||
from tools.wpt.virtualenv import Virtualenv
|
||||
from tools.wpt.utils import Kwargs
|
||||
from tools.wpt.run import create_parser, setup_wptrunner
|
||||
from tools.wpt import markdown
|
||||
from tools import localpaths
|
||||
|
||||
logger = None
|
||||
run, write_inconsistent, write_results = None, None, None
|
||||
wptrunner = None
|
||||
|
||||
def setup_logging():
|
||||
"""Set up basic debug logger."""
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
formatter = logging.Formatter(logging.BASIC_FORMAT, None)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
def do_delayed_imports():
|
||||
global run, write_inconsistent, write_results, wptrunner
|
||||
from tools.wpt.stability import run, write_inconsistent, write_results
|
||||
from wptrunner import wptrunner
|
||||
|
||||
|
||||
class TravisFold(object):
|
||||
"""Context for TravisCI folding mechanism. Subclasses object.
|
||||
|
||||
See: https://blog.travis-ci.com/2013-05-22-improving-build-visibility-log-folds/
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
"""Register TravisCI folding section name."""
|
||||
self.name = name
|
||||
|
||||
def __enter__(self):
|
||||
"""Emit fold start syntax."""
|
||||
print("travis_fold:start:%s" % self.name, file=sys.stderr)
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
"""Emit fold end syntax."""
|
||||
print("travis_fold:end:%s" % self.name, file=sys.stderr)
|
||||
|
||||
|
||||
class FilteredIO(object):
|
||||
"""Wrap a file object, invoking the provided callback for every call to
|
||||
`write` and only proceeding with the operation when that callback returns
|
||||
True."""
|
||||
def __init__(self, original, on_write):
|
||||
self.original = original
|
||||
self.on_write = on_write
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.original, name)
|
||||
|
||||
def disable(self):
|
||||
self.write = lambda msg: None
|
||||
|
||||
def write(self, msg):
|
||||
encoded = msg.encode("utf8", "backslashreplace").decode("utf8")
|
||||
if self.on_write(self.original, encoded) is True:
|
||||
self.original.write(encoded)
|
||||
|
||||
|
||||
def replace_streams(capacity, warning_msg):
|
||||
# Value must be boxed to support modification from inner function scope
|
||||
count = [0]
|
||||
capacity -= 2 + len(warning_msg)
|
||||
stderr = sys.stderr
|
||||
|
||||
def on_write(handle, msg):
|
||||
length = len(msg)
|
||||
count[0] += length
|
||||
|
||||
if count[0] > capacity:
|
||||
wrapped_stdout.disable()
|
||||
wrapped_stderr.disable()
|
||||
handle.write(msg[0:capacity - count[0]])
|
||||
handle.flush()
|
||||
stderr.write("\n%s\n" % warning_msg)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
# Store local references to the replaced streams to guard against the case
|
||||
# where other code replace the global references.
|
||||
sys.stdout = wrapped_stdout = FilteredIO(sys.stdout, on_write)
|
||||
sys.stderr = wrapped_stderr = FilteredIO(sys.stderr, on_write)
|
||||
|
||||
|
||||
def call(*args):
|
||||
"""Log terminal command, invoke it as a subprocess.
|
||||
|
||||
Returns a bytestring of the subprocess output if no error.
|
||||
"""
|
||||
logger.debug("%s" % " ".join(args))
|
||||
try:
|
||||
return subprocess.check_output(args)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.critical("%s exited with return code %i" %
|
||||
(e.cmd, e.returncode))
|
||||
logger.critical(e.output)
|
||||
raise
|
||||
|
||||
def fetch_wpt(user, *args):
|
||||
git = get_git_cmd(wpt_root)
|
||||
git("fetch", "https://github.com/%s/web-platform-tests.git" % user, *args)
|
||||
|
||||
|
||||
def get_sha1():
|
||||
""" Get and return sha1 of current git branch HEAD commit."""
|
||||
git = get_git_cmd(wpt_root)
|
||||
return git("rev-parse", "HEAD").strip()
|
||||
|
||||
|
||||
def install_wptrunner():
|
||||
"""Install wptrunner."""
|
||||
call("pip", "install", wptrunner_root)
|
||||
|
||||
|
||||
def deepen_checkout(user):
|
||||
"""Convert from a shallow checkout to a full one"""
|
||||
fetch_args = [user, "+refs/heads/*:refs/remotes/origin/*"]
|
||||
if os.path.exists(os.path.join(wpt_root, ".git", "shallow")):
|
||||
fetch_args.insert(1, "--unshallow")
|
||||
fetch_wpt(*fetch_args)
|
||||
|
||||
|
||||
def get_parser():
|
||||
"""Create and return script-specific argument parser."""
|
||||
description = """Detect instabilities in new tests by executing tests
|
||||
repeatedly and comparing results between executions."""
|
||||
parser = argparse.ArgumentParser(description=description)
|
||||
parser.add_argument("--user",
|
||||
action="store",
|
||||
# Travis docs say do not depend on USER env variable.
|
||||
# This is a workaround to get what should be the same value
|
||||
default=os.environ.get("TRAVIS_REPO_SLUG", "w3c").split('/')[0],
|
||||
help="Travis user name")
|
||||
parser.add_argument("--output-bytes",
|
||||
action="store",
|
||||
type=int,
|
||||
help="Maximum number of bytes to write to standard output/error")
|
||||
parser.add_argument("--metadata",
|
||||
dest="metadata_root",
|
||||
action="store",
|
||||
default=wpt_root,
|
||||
help="Directory that will contain MANIFEST.json")
|
||||
parser.add_argument("--config-file",
|
||||
action="store",
|
||||
type=str,
|
||||
help="Location of ini-formatted configuration file",
|
||||
default="check_stability.ini")
|
||||
parser.add_argument("--rev",
|
||||
action="store",
|
||||
default=None,
|
||||
help="Commit range to use")
|
||||
return parser
|
||||
|
||||
|
||||
def set_default_args(kwargs):
|
||||
kwargs.set_if_none("sauce_platform",
|
||||
os.environ.get("PLATFORM"))
|
||||
kwargs.set_if_none("sauce_build",
|
||||
os.environ.get("TRAVIS_BUILD_NUMBER"))
|
||||
python_version = os.environ.get("TRAVIS_PYTHON_VERSION")
|
||||
kwargs.set_if_none("sauce_tags",
|
||||
[python_version] if python_version else [])
|
||||
kwargs.set_if_none("sauce_tunnel_id",
|
||||
os.environ.get("TRAVIS_JOB_NUMBER"))
|
||||
kwargs.set_if_none("sauce_user",
|
||||
os.environ.get("SAUCE_USERNAME"))
|
||||
kwargs.set_if_none("sauce_key",
|
||||
os.environ.get("SAUCE_ACCESS_KEY"))
|
||||
|
||||
|
||||
def pr():
|
||||
pr = os.environ.get("TRAVIS_PULL_REQUEST", "false")
|
||||
return pr if pr != "false" else None
|
||||
|
||||
|
||||
def post_results(results, pr_number, iterations, product, url, status):
|
||||
"""Post stability results to a given URL."""
|
||||
payload_results = []
|
||||
|
||||
for test_name, test in results.iteritems():
|
||||
subtests = []
|
||||
for subtest_name, subtest in test['subtests'].items():
|
||||
subtests.append({
|
||||
'test': subtest_name,
|
||||
'result': {
|
||||
'messages': list(subtest['messages']),
|
||||
'status': subtest['status']
|
||||
},
|
||||
})
|
||||
payload_results.append({
|
||||
'test': test_name,
|
||||
'result': {
|
||||
'status': test['status'],
|
||||
'subtests': subtests
|
||||
}
|
||||
})
|
||||
|
||||
payload = {
|
||||
"pull": {
|
||||
"number": int(pr_number),
|
||||
"sha": os.environ.get("TRAVIS_PULL_REQUEST_SHA"),
|
||||
},
|
||||
"job": {
|
||||
"id": int(os.environ.get("TRAVIS_JOB_ID")),
|
||||
"number": os.environ.get("TRAVIS_JOB_NUMBER"),
|
||||
"allow_failure": os.environ.get("TRAVIS_ALLOW_FAILURE") == 'true',
|
||||
"status": status,
|
||||
},
|
||||
"build": {
|
||||
"id": int(os.environ.get("TRAVIS_BUILD_ID")),
|
||||
"number": os.environ.get("TRAVIS_BUILD_NUMBER"),
|
||||
},
|
||||
"product": product,
|
||||
"iterations": iterations,
|
||||
"message": "All results were stable." if status == "passed" else "Unstable results.",
|
||||
"results": payload_results,
|
||||
}
|
||||
|
||||
requests.post(url, json=payload)
|
||||
|
||||
|
||||
def main():
|
||||
"""Perform check_stability functionality and return exit code."""
|
||||
|
||||
venv = Virtualenv(os.environ.get("VIRTUAL_ENV", os.path.join(wpt_root, "_venv")))
|
||||
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", "requirements.txt"))
|
||||
venv.install("requests")
|
||||
|
||||
args, wpt_args = get_parser().parse_known_args()
|
||||
return run(venv, wpt_args, **vars(args))
|
||||
|
||||
|
||||
def run(venv, wpt_args, **kwargs):
|
||||
global logger
|
||||
|
||||
do_delayed_imports()
|
||||
|
||||
retcode = 0
|
||||
parser = get_parser()
|
||||
|
||||
wpt_args = create_parser().parse_args(wpt_args)
|
||||
|
||||
with open(kwargs["config_file"], 'r') as config_fp:
|
||||
config = SafeConfigParser()
|
||||
config.readfp(config_fp)
|
||||
skip_tests = config.get("file detection", "skip_tests").split()
|
||||
ignore_changes = set(config.get("file detection", "ignore_changes").split())
|
||||
results_url = config.get("file detection", "results_url")
|
||||
|
||||
if kwargs["output_bytes"] is not None:
|
||||
replace_streams(kwargs["output_bytes"],
|
||||
"Log reached capacity (%s bytes); output disabled." % kwargs["output_bytes"])
|
||||
|
||||
|
||||
wpt_args.metadata_root = kwargs["metadata_root"]
|
||||
try:
|
||||
os.makedirs(wpt_args.metadata_root)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
logger = logging.getLogger(os.path.splitext(__file__)[0])
|
||||
|
||||
setup_logging()
|
||||
|
||||
browser_name = wpt_args.product.split(":")[0]
|
||||
|
||||
if browser_name == "sauce" and not wpt_args.sauce_key:
|
||||
logger.warning("Cannot run tests on Sauce Labs. No access key.")
|
||||
return retcode
|
||||
|
||||
pr_number = pr()
|
||||
|
||||
with TravisFold("browser_setup"):
|
||||
logger.info(markdown.format_comment_title(wpt_args.product))
|
||||
|
||||
if pr is not None:
|
||||
deepen_checkout(kwargs["user"])
|
||||
|
||||
# Ensure we have a branch called "master"
|
||||
fetch_wpt(kwargs["user"], "master:master")
|
||||
|
||||
head_sha1 = get_sha1()
|
||||
logger.info("Testing web-platform-tests at revision %s" % head_sha1)
|
||||
|
||||
if not kwargs["rev"]:
|
||||
branch_point = testfiles.branch_point()
|
||||
revish = "%s..HEAD" % branch_point
|
||||
else:
|
||||
revish = kwargs["rev"]
|
||||
|
||||
files_changed, files_ignored = testfiles.files_changed(revish, ignore_changes)
|
||||
|
||||
if files_ignored:
|
||||
logger.info("Ignoring %s changed files:\n%s" % (len(files_ignored),
|
||||
"".join(" * %s\n" % item for item in files_ignored)))
|
||||
|
||||
tests_changed, files_affected = testfiles.affected_testfiles(files_changed, skip_tests,
|
||||
manifest_path=os.path.join(
|
||||
wpt_args.metadata_root,
|
||||
"MANIFEST.json"))
|
||||
|
||||
if not (tests_changed or files_affected):
|
||||
logger.info("No tests changed")
|
||||
return 0
|
||||
|
||||
wpt_kwargs = Kwargs(vars(wpt_args))
|
||||
wpt_kwargs["test_list"] = list(tests_changed | files_affected)
|
||||
set_default_args(wpt_kwargs)
|
||||
|
||||
do_delayed_imports()
|
||||
|
||||
wpt_kwargs["stability"] = True
|
||||
wpt_kwargs["prompt"] = False
|
||||
wpt_kwargs["install_browser"] = True
|
||||
wpt_kwargs["install"] = wpt_kwargs["product"].split(":")[0] == "firefox"
|
||||
|
||||
wpt_kwargs = setup_wptrunner(venv, **wpt_kwargs)
|
||||
|
||||
logger.info("Using binary %s" % wpt_kwargs["binary"])
|
||||
|
||||
if tests_changed:
|
||||
logger.debug("Tests changed:\n%s" % "".join(" * %s\n" % item for item in tests_changed))
|
||||
|
||||
if files_affected:
|
||||
logger.debug("Affected tests:\n%s" % "".join(" * %s\n" % item for item in files_affected))
|
||||
|
||||
|
||||
with TravisFold("running_tests"):
|
||||
logger.info("Starting tests")
|
||||
|
||||
|
||||
wpt_logger = wptrunner.logger
|
||||
iterations, results, inconsistent = run(venv, wpt_logger, **wpt_kwargs)
|
||||
|
||||
if results:
|
||||
if inconsistent:
|
||||
write_inconsistent(logger.error, inconsistent, iterations)
|
||||
retcode = 2
|
||||
else:
|
||||
logger.info("All results were stable\n")
|
||||
with TravisFold("full_results"):
|
||||
write_results(logger.info, results, iterations,
|
||||
pr_number=pr_number,
|
||||
use_details=True)
|
||||
if pr_number:
|
||||
post_results(results, iterations=iterations, url=results_url,
|
||||
product=wpt_args.product, pr_number=pr_number,
|
||||
status="failed" if inconsistent else "passed")
|
||||
else:
|
||||
logger.info("No tests run.")
|
||||
|
||||
return retcode
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
retcode = main()
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
else:
|
||||
sys.exit(retcode)
|
25
tests/wpt/web-platform-tests/tools/ci/ci_built_diff.sh
Executable file
25
tests/wpt/web-platform-tests/tools/ci/ci_built_diff.sh
Executable file
|
@ -0,0 +1,25 @@
|
|||
set -ex
|
||||
|
||||
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
|
||||
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
|
||||
cd $WPT_ROOT
|
||||
|
||||
main() {
|
||||
# Diff PNGs based on pixel-for-pixel identity
|
||||
echo -e '[diff "img"]\n textconv = identify -quiet -format "%#"' >> .git/config
|
||||
echo -e '*.png diff=img' >> .git/info/attributes
|
||||
|
||||
# Exclude tests that rely on font rendering
|
||||
excluded=(
|
||||
'2dcontext/drawing-text-to-the-canvas/2d.text.draw.fill.basic.png'
|
||||
'2dcontext/drawing-text-to-the-canvas/2d.text.draw.fill.maxWidth.large.png'
|
||||
'2dcontext/drawing-text-to-the-canvas/2d.text.draw.fill.rtl.png'
|
||||
'2dcontext/drawing-text-to-the-canvas/2d.text.draw.stroke.basic.png'
|
||||
)
|
||||
|
||||
./update-built-tests.sh
|
||||
git update-index --assume-unchanged ${excluded[*]}
|
||||
git diff --exit-code
|
||||
}
|
||||
|
||||
main
|
9
tests/wpt/web-platform-tests/tools/ci/ci_lint.sh
Normal file
9
tests/wpt/web-platform-tests/tools/ci/ci_lint.sh
Normal file
|
@ -0,0 +1,9 @@
|
|||
set -ex
|
||||
|
||||
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
|
||||
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
|
||||
cd $WPT_ROOT
|
||||
|
||||
mkdir -p ~/meta
|
||||
./wpt manifest -p ~/meta/MANIFEST.json
|
||||
./wpt lint
|
20
tests/wpt/web-platform-tests/tools/ci/ci_resources_unittest.sh
Executable file
20
tests/wpt/web-platform-tests/tools/ci/ci_resources_unittest.sh
Executable file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
|
||||
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
|
||||
cd $WPT_ROOT
|
||||
|
||||
main() {
|
||||
cd $WPT_ROOT
|
||||
pip install -U tox
|
||||
pip install --requirement tools/wpt/requirements.txt
|
||||
./wpt install firefox browser --destination $HOME
|
||||
./wpt install firefox webdriver --destination $HOME/firefox
|
||||
export PATH=$HOME/firefox:$PATH
|
||||
|
||||
cd $WPT_ROOT/resources/test
|
||||
tox
|
||||
}
|
||||
|
||||
main
|
22
tests/wpt/web-platform-tests/tools/ci/ci_stability.sh
Normal file
22
tests/wpt/web-platform-tests/tools/ci/ci_stability.sh
Normal file
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
|
||||
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
|
||||
cd $WPT_ROOT
|
||||
|
||||
source tools/ci/lib.sh
|
||||
|
||||
test_stability() {
|
||||
./wpt check-stability $PRODUCT --output-bytes $((1024 * 1024 * 3)) --metadata ~/meta/ --install-fonts
|
||||
}
|
||||
|
||||
main() {
|
||||
hosts_fixup
|
||||
if [ $(echo $PRODUCT | grep '^chrome:') ]; then
|
||||
install_chrome $(echo $PRODUCT | grep --only-matching '\w\+$')
|
||||
fi
|
||||
test_stability
|
||||
}
|
||||
|
||||
main
|
25
tests/wpt/web-platform-tests/tools/ci/ci_tools_unittest.sh
Executable file
25
tests/wpt/web-platform-tests/tools/ci/ci_tools_unittest.sh
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
|
||||
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
|
||||
cd $WPT_ROOT
|
||||
|
||||
if [[ $(./wpt test-jobs --includes tools_unittest; echo $?) -eq 0 ]]; then
|
||||
pip install -U tox codecov
|
||||
cd tools
|
||||
tox
|
||||
cd $WPT_ROOT
|
||||
else
|
||||
echo "Skipping tools unittest"
|
||||
fi
|
||||
|
||||
if [[ $(./wpt test-jobs --includes wptrunner_unittest; echo $?) -eq 0 ]]; then
|
||||
if [ $TOXENV == "py27" ] || [ $TOXENV == "pypy" ]; then
|
||||
cd tools/wptrunner
|
||||
tox
|
||||
fi
|
||||
else
|
||||
echo "Skipping wptrunner unittest"
|
||||
fi
|
||||
|
19
tests/wpt/web-platform-tests/tools/ci/ci_wpt.sh
Normal file
19
tests/wpt/web-platform-tests/tools/ci/ci_wpt.sh
Normal file
|
@ -0,0 +1,19 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
|
||||
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
|
||||
cd $WPT_ROOT
|
||||
|
||||
source tools/ci/lib.sh
|
||||
|
||||
main() {
|
||||
git fetch --unshallow https://github.com/w3c/web-platform-tests.git +refs/heads/*:refs/remotes/origin/*
|
||||
hosts_fixup
|
||||
install_chrome unstable
|
||||
pip install -U tox codecov
|
||||
cd tools/wpt
|
||||
tox
|
||||
}
|
||||
|
||||
main
|
6
tests/wpt/web-platform-tests/tools/ci/commands.json
Normal file
6
tests/wpt/web-platform-tests/tools/ci/commands.json
Normal file
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"test-jobs": {"path": "jobs.py", "script": "run", "parser": "create_parser", "help": "List test jobs that should run for a set of commits",
|
||||
"virtualenv": false},
|
||||
"check-stability": {"path": "check_stability.py", "script": "run", "parser": "get_parser", "parse_known": true, "help": "Check test stability",
|
||||
"virtualenv": true, "install": ["requests"], "requirements": ["../wptrunner/requirements.txt"]}
|
||||
}
|
11
tests/wpt/web-platform-tests/tools/ci/install.sh
Executable file
11
tests/wpt/web-platform-tests/tools/ci/install.sh
Executable file
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
|
||||
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
|
||||
cd $WPT_ROOT
|
||||
|
||||
if [[ $RUN_JOB -eq 1 ]]; then
|
||||
pip install -U setuptools
|
||||
pip install -U requests
|
||||
fi
|
119
tests/wpt/web-platform-tests/tools/ci/jobs.py
Normal file
119
tests/wpt/web-platform-tests/tools/ci/jobs.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
import argparse
|
||||
import os
|
||||
import re
|
||||
from ..wpt.testfiles import branch_point, files_changed, affected_testfiles
|
||||
|
||||
from tools import localpaths
|
||||
from six import iteritems
|
||||
|
||||
wpt_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
|
||||
|
||||
# Rules are just regex on the path, with a leading ! indicating a regex that must not
|
||||
# match for the job
|
||||
job_path_map = {
|
||||
"stability": [".*/.*",
|
||||
"!tools/",
|
||||
"!docs/",
|
||||
"!resources/*",
|
||||
"!conformance-checkers/",
|
||||
"!.*/OWNERS",
|
||||
"!.*/tools/",
|
||||
"!.*/README",
|
||||
"!css/[^/]*$"],
|
||||
"lint": [".*"],
|
||||
"resources_unittest": ["resources/"],
|
||||
"tools_unittest": ["tools/"],
|
||||
"wptrunner_unittest": ["tools/wptrunner/*"],
|
||||
"build_css": ["css/"],
|
||||
"update_built": ["2dcontext/",
|
||||
"assumptions/",
|
||||
"html/",
|
||||
"offscreen-canvas/"],
|
||||
"wpt_integration": ["tools/"],
|
||||
}
|
||||
|
||||
|
||||
class Ruleset(object):
|
||||
def __init__(self, rules):
|
||||
self.include = []
|
||||
self.exclude = []
|
||||
for rule in rules:
|
||||
self.add_rule(rule)
|
||||
|
||||
def add_rule(self, rule):
|
||||
if rule.startswith("!"):
|
||||
target = self.exclude
|
||||
rule = rule[1:]
|
||||
else:
|
||||
target = self.include
|
||||
|
||||
target.append(re.compile("^%s" % rule))
|
||||
|
||||
def __call__(self, path):
|
||||
if os.path.sep != "/":
|
||||
path = path.replace(os.path.sep, "/")
|
||||
path = os.path.normcase(path)
|
||||
for item in self.exclude:
|
||||
if item.match(path):
|
||||
return False
|
||||
for item in self.include:
|
||||
if item.match(path):
|
||||
return True
|
||||
return False
|
||||
|
||||
def __repr__(self):
|
||||
subs = tuple(",".join(item.pattern for item in target)
|
||||
for target in (self.include, self.exclude))
|
||||
return "Rules<include:[%s] exclude:[%s]>" % subs
|
||||
|
||||
|
||||
def get_paths(**kwargs):
|
||||
if kwargs["revish"] is None:
|
||||
revish = "%s..HEAD" % branch_point()
|
||||
else:
|
||||
revish = kwargs["revish"]
|
||||
|
||||
changed, _ = files_changed(revish)
|
||||
all_changed = set(os.path.relpath(item, wpt_root)
|
||||
for item in set(changed))
|
||||
return all_changed
|
||||
|
||||
|
||||
def get_jobs(paths, **kwargs):
|
||||
jobs = set()
|
||||
|
||||
rules = {}
|
||||
includes = kwargs.get("includes")
|
||||
if includes is not None:
|
||||
includes = set(includes)
|
||||
for key, value in iteritems(job_path_map):
|
||||
if includes is None or key in includes:
|
||||
rules[key] = Ruleset(value)
|
||||
|
||||
for path in paths:
|
||||
for job in list(rules.keys()):
|
||||
ruleset = rules[job]
|
||||
if ruleset(path):
|
||||
rules.pop(job)
|
||||
jobs.add(job)
|
||||
if not rules:
|
||||
break
|
||||
|
||||
return jobs
|
||||
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("revish", default=None, help="Commits to consider. Defaults to the commits on the current branch", nargs="?")
|
||||
parser.add_argument("--includes", default=None, help="Jobs to check for. Return code is 0 if all jobs are found, otherwise 1", nargs="*")
|
||||
return parser
|
||||
|
||||
|
||||
def run(**kwargs):
|
||||
paths = get_paths(**kwargs)
|
||||
jobs = get_jobs(paths, **kwargs)
|
||||
if not kwargs["includes"]:
|
||||
for item in sorted(jobs):
|
||||
print(item)
|
||||
else:
|
||||
return 0 if set(kwargs["includes"]) == jobs else 1
|
45
tests/wpt/web-platform-tests/tools/ci/lib.sh
Normal file
45
tests/wpt/web-platform-tests/tools/ci/lib.sh
Normal file
|
@ -0,0 +1,45 @@
|
|||
#!/bin/bash
|
||||
|
||||
hosts_fixup() {
|
||||
echo "travis_fold:start:hosts_fixup"
|
||||
echo "Rewriting hosts file"
|
||||
echo "## /etc/hosts ##"
|
||||
cat /etc/hosts
|
||||
sudo sed -i 's/^::1\s*localhost/::1/' /etc/hosts
|
||||
sudo sh -c 'echo "
|
||||
127.0.0.1 web-platform.test
|
||||
127.0.0.1 www.web-platform.test
|
||||
127.0.0.1 www1.web-platform.test
|
||||
127.0.0.1 www2.web-platform.test
|
||||
127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
|
||||
127.0.0.1 xn--lve-6lad.web-platform.test
|
||||
0.0.0.0 nonexistent-origin.web-platform.test
|
||||
" >> /etc/hosts'
|
||||
echo "== /etc/hosts =="
|
||||
cat /etc/hosts
|
||||
echo "----------------"
|
||||
echo "travis_fold:end:hosts_fixup"
|
||||
}
|
||||
|
||||
install_chrome() {
|
||||
channel=$1
|
||||
deb_archive=google-chrome-${channel}_current_amd64.deb
|
||||
wget https://dl.google.com/linux/direct/$deb_archive
|
||||
|
||||
# If the environment provides an installation of Google Chrome, the
|
||||
# existing binary may take precedence over the one introduced in this
|
||||
# script. Remove any previously-existing "alternatives" prior to
|
||||
# installation in order to ensure that the new binary is installed as
|
||||
# intended.
|
||||
if sudo update-alternatives --list google-chrome; then
|
||||
sudo update-alternatives --remove-all google-chrome
|
||||
fi
|
||||
|
||||
# Installation will fail in cases where the package has unmet dependencies.
|
||||
# When this occurs, attempt to use the system package manager to fetch the
|
||||
# required packages and retry.
|
||||
if ! sudo dpkg --install $deb_archive; then
|
||||
sudo apt-get install --fix-broken
|
||||
sudo dpkg --install $deb_archive
|
||||
fi
|
||||
}
|
10
tests/wpt/web-platform-tests/tools/ci/run.sh
Executable file
10
tests/wpt/web-platform-tests/tools/ci/run.sh
Executable file
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
|
||||
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
|
||||
cd $WPT_ROOT
|
||||
|
||||
if [[ $RUN_JOB -eq 1 ]]; then
|
||||
. $SCRIPT
|
||||
fi
|
76
tests/wpt/web-platform-tests/tools/ci/tests/test_jobs.py
Normal file
76
tests/wpt/web-platform-tests/tools/ci/tests/test_jobs.py
Normal file
|
@ -0,0 +1,76 @@
|
|||
from tools.ci import jobs
|
||||
|
||||
def test_testharness():
|
||||
assert jobs.get_jobs(["resources/testharness.js"]) == set(["lint", "resources_unittest"])
|
||||
assert jobs.get_jobs(["resources/testharness.js"],
|
||||
includes=["resources_unittest"]) == set(["resources_unittest"])
|
||||
assert jobs.get_jobs(["foo/resources/testharness.js"],
|
||||
includes=["resources_unittest"]) == set()
|
||||
|
||||
def test_stability():
|
||||
assert jobs.get_jobs(["dom/historical.html"],
|
||||
includes=["stability"]) == set(["stability"])
|
||||
assert jobs.get_jobs(["tools/pytest.ini"],
|
||||
includes=["stability"]) == set()
|
||||
assert jobs.get_jobs(["serve"],
|
||||
includes=["stability"]) == set()
|
||||
assert jobs.get_jobs(["resources/testharness.js"],
|
||||
includes=["stability"]) == set()
|
||||
assert jobs.get_jobs(["docs/.gitignore"],
|
||||
includes=["stability"]) == set()
|
||||
assert jobs.get_jobs(["dom/tools/example.py"],
|
||||
includes=["stability"]) == set()
|
||||
assert jobs.get_jobs(["conformance-checkers/test.html"],
|
||||
includes=["stability"]) == set()
|
||||
assert jobs.get_jobs(["dom/README.md"],
|
||||
includes=["stability"]) == set()
|
||||
assert jobs.get_jobs(["css/build-css-testsuite.sh"],
|
||||
includes=["stability"]) == set()
|
||||
assert jobs.get_jobs(["css/CSS21/test-001.html"],
|
||||
includes=["stability"]) == set(["stability"])
|
||||
assert jobs.get_jobs(["css/build-css-testsuite.sh",
|
||||
"css/CSS21/test-001.html"],
|
||||
includes=["stability"]) == set(["stability"])
|
||||
|
||||
def test_lint():
|
||||
assert jobs.get_jobs(["README.md"]) == set(["lint"])
|
||||
|
||||
def test_tools_unittest():
|
||||
assert jobs.get_jobs(["tools/ci/test/test_jobs.py"],
|
||||
includes=["tools_unittest"]) == set(["tools_unittest"])
|
||||
assert jobs.get_jobs(["dom/tools/example.py"],
|
||||
includes=["tools_unittest"]) == set()
|
||||
assert jobs.get_jobs(["dom/historical.html"],
|
||||
includes=["tools_unittest"]) == set()
|
||||
|
||||
def test_wptrunner_unittest():
|
||||
assert jobs.get_jobs(["tools/wptrunner/wptrunner/wptrunner.py"],
|
||||
includes=["wptrunner_unittest"]) == set(["wptrunner_unittest"])
|
||||
assert jobs.get_jobs(["tools/example.py"],
|
||||
includes=["wptrunner_unittest"]) == set()
|
||||
|
||||
def test_build_css():
|
||||
assert jobs.get_jobs(["css/css-build-testsuites.sh"],
|
||||
includes=["build_css"]) == set(["build_css"])
|
||||
assert jobs.get_jobs(["css/CSS21/test.html"],
|
||||
includes=["build_css"]) == set(["build_css"])
|
||||
assert jobs.get_jobs(["html/css/CSS21/test.html"],
|
||||
includes=["build_css"]) == set()
|
||||
|
||||
|
||||
def test_update_built():
|
||||
assert jobs.get_jobs(["2dcontext/foo.html"],
|
||||
includes=["update_built"]) == set(["update_built"])
|
||||
assert jobs.get_jobs(["assumptions/foo.html"],
|
||||
includes=["update_built"]) == set(["update_built"])
|
||||
assert jobs.get_jobs(["html/foo.html"],
|
||||
includes=["update_built"]) == set(["update_built"])
|
||||
assert jobs.get_jobs(["offscreen-canvas/foo.html"],
|
||||
includes=["update_built"]) == set(["update_built"])
|
||||
|
||||
|
||||
def test_wpt_integration():
|
||||
assert jobs.get_jobs(["tools/wpt/wpt.py"],
|
||||
includes=["wpt_integration"]) == set(["wpt_integration"])
|
||||
assert jobs.get_jobs(["tools/wptrunner/wptrunner/wptrunner.py"],
|
||||
includes=["wpt_integration"]) == set(["wpt_integration"])
|
3
tests/wpt/web-platform-tests/tools/lint/commands.json
Normal file
3
tests/wpt/web-platform-tests/tools/lint/commands.json
Normal file
|
@ -0,0 +1,3 @@
|
|||
{"lint":
|
||||
{"path": "lint.py", "script": "main", "parser": "create_parser", "help": "Run the lint",
|
||||
"virtualenv": false}}
|
|
@ -15,8 +15,9 @@ from collections import defaultdict
|
|||
from . import fnmatch
|
||||
from .. import localpaths
|
||||
from ..gitignore.gitignore import PathFilter
|
||||
from ..wpt import testfiles
|
||||
|
||||
from manifest.sourcefile import SourceFile, js_meta_re, python_meta_re
|
||||
from manifest.sourcefile import SourceFile, js_meta_re, python_meta_re, space_chars
|
||||
from six import binary_type, iteritems, itervalues
|
||||
from six.moves import range
|
||||
from six.moves.urllib.parse import urlsplit, urljoin
|
||||
|
@ -126,6 +127,13 @@ def check_worker_collision(repo_root, path, css_mode):
|
|||
return []
|
||||
|
||||
|
||||
def check_ahem_copy(repo_root, path, css_mode):
|
||||
lpath = path.lower()
|
||||
if "ahem" in lpath and lpath.endswith(".ttf"):
|
||||
return [("AHEM COPY", "Don't add extra copies of Ahem, use /fonts/Ahem.ttf", path, None)]
|
||||
return []
|
||||
|
||||
|
||||
drafts_csswg_re = re.compile(r"https?\:\/\/drafts\.csswg\.org\/([^/?#]+)")
|
||||
w3c_tr_re = re.compile(r"https?\:\/\/www\.w3c?\.org\/TR\/([^/?#]+)")
|
||||
w3c_dev_re = re.compile(r"https?\:\/\/dev\.w3c?\.org\/[^/?#]+\/([^/?#]+)")
|
||||
|
@ -335,12 +343,24 @@ class ConsoleRegexp(Regexp):
|
|||
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
|
||||
description = "Console logging API used"
|
||||
|
||||
class GenerateTestsRegexp(Regexp):
|
||||
pattern = b"generate_tests\s*\("
|
||||
error = "GENERATE_TESTS"
|
||||
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
|
||||
description = "generate_tests used"
|
||||
|
||||
class PrintRegexp(Regexp):
|
||||
pattern = b"print(?:\s|\s*\()"
|
||||
error = "PRINT STATEMENT"
|
||||
file_extensions = [".py"]
|
||||
description = "Print function used"
|
||||
|
||||
class LayoutTestsRegexp(Regexp):
|
||||
pattern = b"eventSender|testRunner|window\.internals"
|
||||
error = "LAYOUTTESTS APIS"
|
||||
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
|
||||
description = "eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)"
|
||||
|
||||
regexps = [item() for item in
|
||||
[TrailingWhitespaceRegexp,
|
||||
TabsRegexp,
|
||||
|
@ -349,7 +369,9 @@ regexps = [item() for item in
|
|||
W3CTestOrgRegexp,
|
||||
Webidl2Regexp,
|
||||
ConsoleRegexp,
|
||||
PrintRegexp]]
|
||||
GenerateTestsRegexp,
|
||||
PrintRegexp,
|
||||
LayoutTestsRegexp]]
|
||||
|
||||
def check_regexp_line(repo_root, path, f, css_mode):
|
||||
errors = []
|
||||
|
@ -395,7 +417,7 @@ def check_parsed(repo_root, path, f, css_mode):
|
|||
return [("CONTENT-VISUAL", "Visual test whose filename doesn't end in '-visual'", path, None)]
|
||||
|
||||
for reftest_node in source_file.reftest_nodes:
|
||||
href = reftest_node.attrib.get("href", "")
|
||||
href = reftest_node.attrib.get("href", "").strip(space_chars)
|
||||
parts = urlsplit(href)
|
||||
if parts.scheme or parts.netloc:
|
||||
errors.append(("ABSOLUTE-URL-REF",
|
||||
|
@ -636,6 +658,7 @@ def output_errors_text(errors):
|
|||
pos_string += ":%s" % line_number
|
||||
logger.error("%s: %s (%s)" % (pos_string, description, error_type))
|
||||
|
||||
|
||||
def output_errors_markdown(errors):
|
||||
if not errors:
|
||||
return
|
||||
|
@ -651,6 +674,7 @@ def output_errors_markdown(errors):
|
|||
pos_string += ":%s" % line_number
|
||||
logger.error("%s | %s | %s |" % (error_type, pos_string, description))
|
||||
|
||||
|
||||
def output_errors_json(errors):
|
||||
for error_type, error, path, line_number in errors:
|
||||
print(json.dumps({"path": path, "lineno": line_number,
|
||||
|
@ -669,7 +693,34 @@ def output_error_count(error_count):
|
|||
else:
|
||||
logger.info("There were %d errors (%s)" % (count, by_type))
|
||||
|
||||
def parse_args():
|
||||
|
||||
def changed_files(wpt_root):
|
||||
revish = testfiles.get_revish(revish=None)
|
||||
changed, _ = testfiles.files_changed(revish, set(), include_uncommitted=True, include_new=True)
|
||||
return [os.path.relpath(item, wpt_root) for item in changed]
|
||||
|
||||
|
||||
def lint_paths(kwargs, wpt_root):
|
||||
if kwargs.get("paths"):
|
||||
paths = kwargs["paths"]
|
||||
elif kwargs["all"]:
|
||||
paths = list(all_filesystem_paths(wpt_root))
|
||||
else:
|
||||
changed_paths = changed_files(wpt_root)
|
||||
force_all = False
|
||||
# If we changed the lint itself ensure that we retest everything
|
||||
for path in changed_paths:
|
||||
path = path.replace(os.path.sep, "/")
|
||||
if path == "lint.whitelist" or path.startswith("tools/lint/"):
|
||||
force_all = True
|
||||
break
|
||||
paths = (list(changed_paths) if not force_all
|
||||
else list(all_filesystem_paths(wpt_root)))
|
||||
|
||||
return paths
|
||||
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("paths", nargs="*",
|
||||
help="List of paths to lint")
|
||||
|
@ -681,7 +732,9 @@ def parse_args():
|
|||
help="Run CSS testsuite specific lints")
|
||||
parser.add_argument("--repo-root", help="The WPT directory. Use this"
|
||||
"option if the lint script exists outside the repository")
|
||||
return parser.parse_args()
|
||||
parser.add_argument("--all", action="store_true", help="If no paths are passed, try to lint the whole "
|
||||
"working directory, not just files that changed")
|
||||
return parser
|
||||
|
||||
|
||||
def main(**kwargs):
|
||||
|
@ -695,9 +748,11 @@ def main(**kwargs):
|
|||
(False, False): "normal"}[(kwargs.get("json", False),
|
||||
kwargs.get("markdown", False))]
|
||||
|
||||
paths = list(kwargs.get("paths") if kwargs.get("paths") else all_filesystem_paths(repo_root))
|
||||
if output_format == "markdown":
|
||||
setup_logging(True)
|
||||
|
||||
paths = lint_paths(kwargs, repo_root)
|
||||
|
||||
return lint(repo_root, paths, output_format, kwargs.get("css_mode", False))
|
||||
|
||||
|
||||
|
@ -760,12 +815,12 @@ def lint(repo_root, paths, output_format, css_mode):
|
|||
logger.info(line)
|
||||
return sum(itervalues(error_count))
|
||||
|
||||
path_lints = [check_path_length, check_worker_collision]
|
||||
path_lints = [check_path_length, check_worker_collision, check_ahem_copy]
|
||||
all_paths_lints = [check_css_globally_unique]
|
||||
file_lints = [check_regexp_line, check_parsed, check_python_ast, check_script_metadata]
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_args()
|
||||
args = create_parser().parse_args()
|
||||
error_count = main(**vars(args))
|
||||
if error_count > 0:
|
||||
sys.exit(1)
|
||||
|
|
|
@ -145,6 +145,51 @@ def test_setTimeout():
|
|||
1)]
|
||||
|
||||
|
||||
def test_eventSender():
|
||||
error_map = check_with_files(b"<script>eventSender.mouseDown()</script>")
|
||||
|
||||
for (filename, (errors, kind)) in error_map.items():
|
||||
check_errors(errors)
|
||||
|
||||
if kind == "python":
|
||||
assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
|
||||
else:
|
||||
assert errors == [('LAYOUTTESTS APIS',
|
||||
'eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
|
||||
filename,
|
||||
1)]
|
||||
|
||||
|
||||
def test_testRunner():
|
||||
error_map = check_with_files(b"<script>if (window.testRunner) { testRunner.waitUntilDone(); }</script>")
|
||||
|
||||
for (filename, (errors, kind)) in error_map.items():
|
||||
check_errors(errors)
|
||||
|
||||
if kind == "python":
|
||||
assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
|
||||
else:
|
||||
assert errors == [('LAYOUTTESTS APIS',
|
||||
'eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
|
||||
filename,
|
||||
1)]
|
||||
|
||||
|
||||
def test_windowDotInternals():
|
||||
error_map = check_with_files(b"<script>if (window.internals) { internals.doAThing(); }</script>")
|
||||
|
||||
for (filename, (errors, kind)) in error_map.items():
|
||||
check_errors(errors)
|
||||
|
||||
if kind == "python":
|
||||
assert errors == [("PARSE-FAILED", "Unable to parse file", filename, 1)]
|
||||
else:
|
||||
assert errors == [('LAYOUTTESTS APIS',
|
||||
'eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)',
|
||||
filename,
|
||||
1)]
|
||||
|
||||
|
||||
def test_meta_timeout():
|
||||
code = b"""
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
|
|
|
@ -9,7 +9,7 @@ import six
|
|||
|
||||
from ...localpaths import repo_root
|
||||
from .. import lint as lint_mod
|
||||
from ..lint import filter_whitelist_errors, parse_whitelist, lint, parse_args
|
||||
from ..lint import filter_whitelist_errors, parse_whitelist, lint, create_parser
|
||||
|
||||
_dummy_repo = os.path.join(os.path.dirname(__file__), "dummy")
|
||||
|
||||
|
@ -389,7 +389,7 @@ def test_main_with_args():
|
|||
try:
|
||||
sys.argv = ['./lint', 'a', 'b', 'c']
|
||||
with _mock_lint('lint', return_value=True) as m:
|
||||
lint_mod.main(**vars(parse_args()))
|
||||
lint_mod.main(**vars(create_parser().parse_args()))
|
||||
m.assert_called_once_with(repo_root, ['a', 'b', 'c'], "normal", False)
|
||||
finally:
|
||||
sys.argv = orig_argv
|
||||
|
@ -400,8 +400,20 @@ def test_main_no_args():
|
|||
try:
|
||||
sys.argv = ['./lint']
|
||||
with _mock_lint('lint', return_value=True) as m:
|
||||
with _mock_lint('all_filesystem_paths', return_value=['foo', 'bar']) as m2:
|
||||
lint_mod.main(**vars(parse_args()))
|
||||
with _mock_lint('changed_files', return_value=['foo', 'bar']) as m2:
|
||||
lint_mod.main(**vars(create_parser().parse_args()))
|
||||
m.assert_called_once_with(repo_root, ['foo', 'bar'], "normal", False)
|
||||
finally:
|
||||
sys.argv = orig_argv
|
||||
|
||||
|
||||
def test_main_all():
|
||||
orig_argv = sys.argv
|
||||
try:
|
||||
sys.argv = ['./lint', '--all']
|
||||
with _mock_lint('lint', return_value=True) as m:
|
||||
with _mock_lint('all_filesystem_paths', return_value=['foo', 'bar']) as m2:
|
||||
lint_mod.main(**vars(create_parser().parse_args()))
|
||||
m.assert_called_once_with(repo_root, ['foo', 'bar'], "normal", False)
|
||||
finally:
|
||||
sys.argv = orig_argv
|
||||
|
|
|
@ -5,7 +5,6 @@ here = os.path.abspath(os.path.split(__file__)[0])
|
|||
repo_root = os.path.abspath(os.path.join(here, os.pardir))
|
||||
|
||||
sys.path.insert(0, os.path.join(here))
|
||||
sys.path.insert(0, os.path.join(here, "browserutils"))
|
||||
sys.path.insert(0, os.path.join(here, "six"))
|
||||
sys.path.insert(0, os.path.join(here, "html5lib"))
|
||||
sys.path.insert(0, os.path.join(here, "wptserve"))
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
{"manifest":
|
||||
{"path": "update.py", "script": "run", "parser": "create_parser", "help": "Update the MANIFEST.json file",
|
||||
"virtualenv": false}}
|
|
@ -2,11 +2,13 @@ import logging
|
|||
import sys
|
||||
|
||||
logger = logging.getLogger("manifest")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
formatter = logging.Formatter(logging.BASIC_FORMAT, None)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
def setup():
|
||||
logger.setLevel(logging.DEBUG)
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
formatter = logging.Formatter(logging.BASIC_FORMAT, None)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
def get_logger():
|
||||
return logger
|
||||
|
|
|
@ -237,6 +237,9 @@ def load(tests_root, manifest):
|
|||
|
||||
|
||||
def write(manifest, manifest_path):
|
||||
dir_name = os.path.dirname(manifest_path)
|
||||
if not os.path.exists(dir_name):
|
||||
os.makedirs(dir_name)
|
||||
with open(manifest_path, "wb") as f:
|
||||
json.dump(manifest.to_json(), f, sort_keys=True, indent=1, separators=(',', ': '))
|
||||
f.write("\n")
|
||||
|
|
|
@ -21,6 +21,8 @@ python_meta_re = re.compile(b"#\s*META:\s*(\w*)=(.*)$")
|
|||
|
||||
reference_file_re = re.compile(r'(^|[\-_])(not)?ref[0-9]*([\-_]|$)')
|
||||
|
||||
space_chars = u"".join(html5lib.constants.spaceCharacters)
|
||||
|
||||
def replace_end(s, old, new):
|
||||
"""
|
||||
Given a string `s` that ends with `old`, replace that occurrence of `old`
|
||||
|
@ -227,7 +229,7 @@ class SourceFile(object):
|
|||
rel_dir_tree = self.rel_path.split(os.path.sep)
|
||||
return (rel_dir_tree[0] == "webdriver" and
|
||||
len(rel_dir_tree) > 1 and
|
||||
self.filename != "__init__.py" and
|
||||
self.filename not in ("__init__.py", "conftest.py") and
|
||||
fnmatch(self.filename, wd_pattern))
|
||||
|
||||
@property
|
||||
|
@ -399,7 +401,7 @@ class SourceFile(object):
|
|||
rel_map = {"match": "==", "mismatch": "!="}
|
||||
for item in self.reftest_nodes:
|
||||
if "href" in item.attrib:
|
||||
ref_url = urljoin(self.url, item.attrib["href"])
|
||||
ref_url = urljoin(self.url, item.attrib["href"].strip(space_chars))
|
||||
ref_type = rel_map[item.attrib["rel"]]
|
||||
rv.append((ref_url, ref_type))
|
||||
return rv
|
||||
|
@ -451,7 +453,7 @@ class SourceFile(object):
|
|||
rv = set()
|
||||
for item in self.spec_link_nodes:
|
||||
if "href" in item.attrib:
|
||||
rv.add(item.attrib["href"])
|
||||
rv.add(item.attrib["href"].strip(space_chars))
|
||||
return rv
|
||||
|
||||
@cached_property
|
||||
|
|
|
@ -542,3 +542,23 @@ def test_no_parse():
|
|||
def test_relpath_normalized(input, expected):
|
||||
s = create(input, b"")
|
||||
assert s.rel_path == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("url", [b"ref.html",
|
||||
b"\x20ref.html",
|
||||
b"ref.html\x20",
|
||||
b"\x09\x0a\x0c\x0d\x20ref.html\x09\x0a\x0c\x0d\x20"])
|
||||
def test_reftest_url_whitespace(url):
|
||||
content = b"<link rel=match href='%s'>" % url
|
||||
s = create("foo/test.html", content)
|
||||
assert s.references == [("/foo/ref.html", "==")]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("url", [b"http://example.com/",
|
||||
b"\x20http://example.com/",
|
||||
b"http://example.com/\x20",
|
||||
b"\x09\x0a\x0c\x0d\x20http://example.com/\x09\x0a\x0c\x0d\x20"])
|
||||
def test_spec_links_whitespace(url):
|
||||
content = b"<link rel=help href='%s'>" % url
|
||||
s = create("foo/test.html", content)
|
||||
assert s.spec_links == {"http://example.com/"}
|
||||
|
|
|
@ -10,6 +10,8 @@ from .log import get_logger
|
|||
|
||||
here = os.path.dirname(__file__)
|
||||
|
||||
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
|
||||
|
||||
|
||||
def update(tests_root, manifest, working_copy=False):
|
||||
tree = None
|
||||
|
@ -57,7 +59,7 @@ def create_parser():
|
|||
parser.add_argument(
|
||||
"-p", "--path", type=abs_path, help="Path to manifest file.")
|
||||
parser.add_argument(
|
||||
"--tests-root", type=abs_path, help="Path to root of tests.")
|
||||
"--tests-root", type=abs_path, default=wpt_root, help="Path to root of tests.")
|
||||
parser.add_argument(
|
||||
"-r", "--rebuild", action="store_true", default=False,
|
||||
help="Force a full rebuild of the manifest.")
|
||||
|
@ -81,24 +83,14 @@ def find_top_repo():
|
|||
return rv
|
||||
|
||||
|
||||
def main(default_tests_root=None):
|
||||
def run(**kwargs):
|
||||
if kwargs["path"] is None:
|
||||
kwargs["path"] = os.path.join(kwargs["tests_root"], "MANIFEST.json")
|
||||
|
||||
update_from_cli(**kwargs)
|
||||
|
||||
|
||||
def main():
|
||||
opts = create_parser().parse_args()
|
||||
|
||||
if opts.tests_root is None:
|
||||
tests_root = None
|
||||
if default_tests_root is not None:
|
||||
tests_root = default_tests_root
|
||||
else:
|
||||
tests_root = find_top_repo()
|
||||
|
||||
if tests_root is None:
|
||||
print >> sys.stderr, """No git repo found; could not determine test root.
|
||||
Run again with --test-root"""
|
||||
sys.exit(1)
|
||||
|
||||
opts.tests_root = tests_root
|
||||
|
||||
if opts.path is None:
|
||||
opts.path = os.path.join(opts.tests_root, "MANIFEST.json")
|
||||
|
||||
update_from_cli(**vars(opts))
|
||||
run(**vars(opts))
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
[pytest]
|
||||
norecursedirs = .* {arch} *.egg html5lib py pytest pywebsocket six wptrunner
|
||||
norecursedirs = .* {arch} *.egg html5lib py pytest pywebsocket six wpt wptrunner
|
||||
|
|
2
tests/wpt/web-platform-tests/tools/serve/commands.json
Normal file
2
tests/wpt/web-platform-tests/tools/serve/commands.json
Normal file
|
@ -0,0 +1,2 @@
|
|||
{"serve": {"path": "serve.py", "script": "run", "parser": "get_parser", "help": "Run wptserve server",
|
||||
"virtualenv": false}}
|
|
@ -40,6 +40,8 @@ class WrapperHandler(object):
|
|||
|
||||
__meta__ = abc.ABCMeta
|
||||
|
||||
headers = []
|
||||
|
||||
def __init__(self, base_path=None, url_base="/"):
|
||||
self.base_path = base_path
|
||||
self.url_base = url_base
|
||||
|
@ -49,6 +51,9 @@ class WrapperHandler(object):
|
|||
self.handler(request, response)
|
||||
|
||||
def handle_request(self, request, response):
|
||||
for header_name, header_value in self.headers:
|
||||
response.headers.set(header_name, header_value)
|
||||
|
||||
path = self._get_path(request.url_parts.path, True)
|
||||
meta = "\n".join(self._get_meta(request))
|
||||
response.content = self.wrapper % {"meta": meta, "path": path}
|
||||
|
@ -169,6 +174,7 @@ self.GLOBAL = {
|
|||
|
||||
|
||||
class AnyWorkerHandler(WrapperHandler):
|
||||
headers = [('Content-Type', 'text/javascript')]
|
||||
path_replace = [(".any.worker.js", ".any.js")]
|
||||
wrapper = """%(meta)s
|
||||
self.GLOBAL = {
|
||||
|
@ -762,10 +768,9 @@ def get_parser():
|
|||
return parser
|
||||
|
||||
|
||||
def main():
|
||||
kwargs = vars(get_parser().parse_args())
|
||||
config = load_config("config.default.json",
|
||||
"config.json",
|
||||
def run(**kwargs):
|
||||
config = load_config(os.path.join(repo_root, "config.default.json"),
|
||||
os.path.join(repo_root, "config.json"),
|
||||
**kwargs)
|
||||
|
||||
setup_logger(config["log_level"])
|
||||
|
@ -784,3 +789,8 @@ def main():
|
|||
item.join(1)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Shutting down")
|
||||
|
||||
|
||||
def main():
|
||||
kwargs = vars(get_parser().parse_args())
|
||||
return run(**kwargs)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[tox]
|
||||
envlist = py27,py35,py36,pypy
|
||||
envlist = py27,py36,pypy
|
||||
skipsdist=True
|
||||
|
||||
[testenv]
|
||||
|
@ -21,4 +21,4 @@ passenv =
|
|||
[flake8]
|
||||
ignore = E128,E129,E221,E226,E231,E251,E265,E302,E303,E305,E402,E901,F401,F821,F841
|
||||
max-line-length = 141
|
||||
exclude = .tox,html5lib,py,pytest,pywebsocket,six,_venv,webencodings,wptserve/docs,wptserve/tests/functional/docroot/,wptrunner
|
||||
exclude = .tox,html5lib,py,pytest,pywebsocket,six,_venv,webencodings,wptserve/docs,wptserve/tests/functional/docroot/,wpt,wptrunner
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
import json
|
||||
import urlparse
|
||||
|
||||
import error
|
||||
import transport
|
||||
|
||||
from mozlog import get_default_logger
|
||||
|
||||
logger = get_default_logger()
|
||||
|
||||
element_key = "element-6066-11e4-a52e-4f735466cecf"
|
||||
|
||||
|
@ -237,37 +241,53 @@ class Window(object):
|
|||
def __init__(self, session):
|
||||
self.session = session
|
||||
|
||||
@property
|
||||
@command
|
||||
def rect(self):
|
||||
return self.session.send_session_command("GET", "window/rect")
|
||||
|
||||
@property
|
||||
@command
|
||||
def size(self):
|
||||
resp = self.session.send_session_command("GET", "window/rect")
|
||||
return (resp["width"], resp["height"])
|
||||
"""Gets the window size as a tuple of `(width, height)`."""
|
||||
rect = self.rect
|
||||
return (rect["width"], rect["height"])
|
||||
|
||||
@size.setter
|
||||
@command
|
||||
def size(self, data):
|
||||
width, height = data
|
||||
def size(self, new_size):
|
||||
"""Set window size by passing a tuple of `(width, height)`."""
|
||||
width, height = new_size
|
||||
body = {"width": width, "height": height}
|
||||
self.session.send_session_command("POST", "window/rect", body)
|
||||
|
||||
@property
|
||||
@command
|
||||
def position(self):
|
||||
resp = self.session.send_session_command("GET", "window/rect")
|
||||
return (resp["x"], resp["y"])
|
||||
"""Gets the window position as a tuple of `(x, y)`."""
|
||||
rect = self.rect
|
||||
return (rect["x"], rect["y"])
|
||||
|
||||
@position.setter
|
||||
@command
|
||||
def position(self, data):
|
||||
data = x, y
|
||||
def position(self, new_position):
|
||||
"""Set window position by passing a tuple of `(x, y)`."""
|
||||
x, y = new_position
|
||||
body = {"x": x, "y": y}
|
||||
self.session.send_session_command("POST", "window/rect", body)
|
||||
|
||||
@property
|
||||
@command
|
||||
def maximize(self):
|
||||
return self.session.send_session_command("POST", "window/maximize")
|
||||
|
||||
@command
|
||||
def minimize(self):
|
||||
return self.session.send_session_command("POST", "window/minimize")
|
||||
|
||||
@command
|
||||
def fullscreen(self):
|
||||
return self.session.send_session_command("POST", "window/fullscreen")
|
||||
|
||||
|
||||
class Find(object):
|
||||
def __init__(self, session):
|
||||
|
@ -376,6 +396,7 @@ class Session(object):
|
|||
|
||||
value = self.send_command("POST", "session", body=body)
|
||||
self.session_id = value["sessionId"]
|
||||
self.capabilities = value["capabilities"]
|
||||
|
||||
if self.extension_cls:
|
||||
self.extension = self.extension_cls(self)
|
||||
|
@ -390,10 +411,6 @@ class Session(object):
|
|||
self.send_command("DELETE", url)
|
||||
|
||||
self.session_id = None
|
||||
self.timeouts = None
|
||||
self.window = None
|
||||
self.find = None
|
||||
self.extension = None
|
||||
|
||||
def send_command(self, method, url, body=None):
|
||||
"""
|
||||
|
@ -411,7 +428,12 @@ class Session(object):
|
|||
an error.
|
||||
"""
|
||||
response = self.transport.send(method, url, body)
|
||||
value = response.body["value"]
|
||||
|
||||
if "value" in response.body:
|
||||
value = response.body["value"]
|
||||
else:
|
||||
raise error.UnknownErrorException("No 'value' key in response body:\n%s" %
|
||||
json.dumps(response.body))
|
||||
|
||||
if response.status != 200:
|
||||
cls = error.get(value.get("error"))
|
||||
|
@ -600,7 +622,7 @@ class Element(object):
|
|||
"value": selector}
|
||||
|
||||
elem = self.send_element_command("POST", "element", body)
|
||||
return self.session.element(elem)
|
||||
return self.session._element(elem)
|
||||
|
||||
@command
|
||||
def click(self):
|
||||
|
@ -637,10 +659,17 @@ class Element(object):
|
|||
def rect(self):
|
||||
return self.send_element_command("GET", "rect")
|
||||
|
||||
@property
|
||||
@command
|
||||
def property(self, name):
|
||||
return self.send_element_command("GET", "property/%s" % name)
|
||||
def selected(self):
|
||||
return self.send_element_command("GET", "selected")
|
||||
|
||||
@command
|
||||
def attribute(self, name):
|
||||
return self.send_element_command("GET", "attribute/%s" % name)
|
||||
|
||||
# This MUST come last because otherwise @property decorators above
|
||||
# will be overridden by this.
|
||||
@command
|
||||
def property(self, name):
|
||||
return self.send_element_command("GET", "property/%s" % name)
|
||||
|
|
|
@ -62,7 +62,7 @@ class MoveTargetOutOfBoundsException(WebDriverException):
|
|||
|
||||
|
||||
class NoSuchAlertException(WebDriverException):
|
||||
http_status = 400
|
||||
http_status = 404
|
||||
status_code = "no such alert"
|
||||
|
||||
|
||||
|
@ -72,12 +72,12 @@ class NoSuchElementException(WebDriverException):
|
|||
|
||||
|
||||
class NoSuchFrameException(WebDriverException):
|
||||
http_status = 400
|
||||
http_status = 404
|
||||
status_code = "no such frame"
|
||||
|
||||
|
||||
class NoSuchWindowException(WebDriverException):
|
||||
http_status = 400
|
||||
http_status = 404
|
||||
status_code = "no such window"
|
||||
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@ import httplib
|
|||
import json
|
||||
import urlparse
|
||||
|
||||
import error
|
||||
|
||||
class Response(object):
|
||||
"""Describes an HTTP response received from a remote en"Describes an HTTP
|
||||
response received from a remote end whose body has been read and parsed as
|
||||
|
@ -27,21 +29,21 @@ class Response(object):
|
|||
# > "application/json; charset=utf-8"
|
||||
# > "cache-control"
|
||||
# > "no-cache"
|
||||
assert http_response.getheader("Content-Type") == "application/json; charset=utf-8"
|
||||
assert http_response.getheader("Cache-Control") == "no-cache"
|
||||
|
||||
if body:
|
||||
body = json.loads(body)
|
||||
|
||||
# SpecID: dfn-send-a-response
|
||||
#
|
||||
# > 4. If data is not null, let response's body be a JSON Object
|
||||
# with a key `value` set to the JSON Serialization of data.
|
||||
assert "value" in body
|
||||
try:
|
||||
body = json.loads(body)
|
||||
except:
|
||||
raise error.UnknownErrorException("Failed to decode body as json:\n%s" % body)
|
||||
|
||||
return cls(status, body)
|
||||
|
||||
|
||||
class ToJsonEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
return getattr(obj.__class__, "json", json.JSONEncoder().default)(obj)
|
||||
|
||||
|
||||
class HTTPWireProtocol(object):
|
||||
"""Transports messages (commands and responses) over the WebDriver
|
||||
wire protocol.
|
||||
|
@ -79,7 +81,7 @@ class HTTPWireProtocol(object):
|
|||
body = {}
|
||||
|
||||
if isinstance(body, dict):
|
||||
body = json.dumps(body)
|
||||
body = json.dumps(body, cls=ToJsonEncoder)
|
||||
|
||||
if isinstance(body, unicode):
|
||||
body = body.encode("utf-8")
|
||||
|
|
0
tests/wpt/web-platform-tests/tools/wpt/__init__.py
Normal file
0
tests/wpt/web-platform-tests/tools/wpt/__init__.py
Normal file
|
@ -24,7 +24,7 @@ class Browser(object):
|
|||
__metaclass__ = ABCMeta
|
||||
|
||||
@abstractmethod
|
||||
def install(self):
|
||||
def install(self, dest=None):
|
||||
return NotImplemented
|
||||
|
||||
@abstractmethod
|
||||
|
@ -104,7 +104,6 @@ class Firefox(Browser):
|
|||
return get("https://archive.mozilla.org/pub/firefox/nightly/latest-mozilla-central/%s" %
|
||||
filename)
|
||||
|
||||
|
||||
def install(self, dest=None):
|
||||
"""Install Firefox."""
|
||||
if dest is None:
|
||||
|
@ -112,13 +111,15 @@ class Firefox(Browser):
|
|||
|
||||
resp = self.get_from_nightly("<a[^>]*>(firefox-\d+\.\d(?:\w\d)?.en-US.%s\.tar\.bz2)" % self.platform_string())
|
||||
untar(resp.raw, dest=dest)
|
||||
return os.path.join(dest, "firefox")
|
||||
return find_executable("firefox", os.path.join(dest, "firefox"))
|
||||
|
||||
def find_binary(self):
|
||||
return find_executable("firefox")
|
||||
def find_binary(self, path=None):
|
||||
return find_executable("firefox", path)
|
||||
|
||||
def find_certutil(self):
|
||||
path = find_executable("certutil")
|
||||
if path is None:
|
||||
return None
|
||||
if os.path.splitdrive(path)[1].split(os.path.sep) == ["", "Windows", "system32", "certutil.exe"]:
|
||||
return None
|
||||
return path
|
||||
|
@ -212,6 +213,9 @@ class Chrome(Browser):
|
|||
binary = "/usr/bin/google-chrome"
|
||||
requirements = "requirements_chrome.txt"
|
||||
|
||||
def install(self, dest=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def platform_string(self):
|
||||
platform = {
|
||||
"Linux": "linux",
|
||||
|
@ -231,9 +235,6 @@ class Chrome(Browser):
|
|||
|
||||
return "%s%s" % (platform, bits)
|
||||
|
||||
def install(self):
|
||||
return None
|
||||
|
||||
def find_webdriver(self):
|
||||
return find_executable("chromedriver")
|
||||
|
||||
|
@ -245,7 +246,8 @@ class Chrome(Browser):
|
|||
url = "http://chromedriver.storage.googleapis.com/%s/chromedriver_%s.zip" % (latest,
|
||||
self.platform_string())
|
||||
unzip(get(url).raw, dest)
|
||||
path = find_executable(dest, "chromedriver")
|
||||
|
||||
path = find_executable("chromedriver", dest)
|
||||
st = os.stat(path)
|
||||
os.chmod(path, st.st_mode | stat.S_IEXEC)
|
||||
return path
|
||||
|
@ -285,8 +287,8 @@ class Edge(Browser):
|
|||
product = "edge"
|
||||
requirements = "requirements_edge.txt"
|
||||
|
||||
def install(self):
|
||||
return None
|
||||
def install(self, dest=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def find_webdriver(self):
|
||||
return find_executable("MicrosoftWebDriver")
|
||||
|
@ -299,8 +301,31 @@ class Edge(Browser):
|
|||
raise NotImplementedError
|
||||
|
||||
|
||||
class InternetExplorer(Browser):
|
||||
"""Internet Explorer-specific interface.
|
||||
|
||||
Includes installation, webdriver installation, and wptrunner setup methods.
|
||||
"""
|
||||
|
||||
product = "ie"
|
||||
requirements = "requirements_ie.txt"
|
||||
|
||||
def install(self, dest=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def find_webdriver(self):
|
||||
return find_executable("IEDriverServer.exe")
|
||||
|
||||
def install_webdriver(self, dest=None):
|
||||
"""Install latest Webdriver."""
|
||||
raise NotImplementedError
|
||||
|
||||
def version(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Servo(Browser):
|
||||
"""Firefox-specific interface.
|
||||
"""Servo-specific interface.
|
||||
|
||||
Includes installation, webdriver installation, and wptrunner setup methods.
|
||||
"""
|
||||
|
@ -308,11 +333,10 @@ class Servo(Browser):
|
|||
product = "servo"
|
||||
requirements = "requirements_servo.txt"
|
||||
|
||||
def install(self, platform, dest=None):
|
||||
"""Install Servo."""
|
||||
def install(self, dest=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def find_binary(self):
|
||||
def find_binary(self, path=None):
|
||||
return find_executable("servo")
|
||||
|
||||
def find_webdriver(self):
|
||||
|
@ -323,3 +347,28 @@ class Servo(Browser):
|
|||
|
||||
def version(self, root):
|
||||
return None
|
||||
|
||||
|
||||
class Sauce(Browser):
|
||||
"""Sauce-specific interface.
|
||||
|
||||
Includes installation, webdriver installation, and wptrunner setup methods.
|
||||
"""
|
||||
|
||||
product = "sauce"
|
||||
requirements = "requirements_sauce.txt"
|
||||
|
||||
def install(self, dest=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def find_binary(self, path=None):
|
||||
return None
|
||||
|
||||
def find_webdriver(self):
|
||||
return None
|
||||
|
||||
def install_webdriver(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def version(self, root):
|
||||
return None
|
9
tests/wpt/web-platform-tests/tools/wpt/commands.json
Normal file
9
tests/wpt/web-platform-tests/tools/wpt/commands.json
Normal file
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"run": {"path": "run.py", "script": "run", "parser": "create_parser", "help": "Run tests in a browser",
|
||||
"virtualenv": true, "install": ["requests"], "requirements": ["../wptrunner/requirements.txt"]},
|
||||
"files-changed": {"path": "testfiles.py", "script": "run_changed_files", "parser": "get_parser",
|
||||
"help": "Get a list of files that have changed", "virtualenv": false},
|
||||
"tests-affected": {"path": "testfiles.py", "script": "run_tests_affected", "parser": "get_parser_affected",
|
||||
"help": "Get a list of tests affected by changes", "virtualenv": false},
|
||||
"install": {"path": "install.py", "script": "run", "parser": "get_parser", "help": "Install browser components"}
|
||||
}
|
46
tests/wpt/web-platform-tests/tools/wpt/install.py
Normal file
46
tests/wpt/web-platform-tests/tools/wpt/install.py
Normal file
|
@ -0,0 +1,46 @@
|
|||
import argparse
|
||||
import browser
|
||||
import sys
|
||||
|
||||
def get_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('browser', choices=['firefox', 'chrome'],
|
||||
help='name of web browser product')
|
||||
parser.add_argument('component', choices=['browser', 'webdriver'],
|
||||
help='name of component')
|
||||
parser.add_argument('-d', '--destination',
|
||||
help='filesystem directory to place the component')
|
||||
return parser
|
||||
|
||||
|
||||
def run(venv, **kwargs):
|
||||
browser = kwargs["browser"]
|
||||
destination = kwargs["destination"]
|
||||
|
||||
if destination is None:
|
||||
if venv:
|
||||
if kwargs["component"] == "browser":
|
||||
destination = venv.path
|
||||
else:
|
||||
destination = venv.bin_path
|
||||
else:
|
||||
raise argparse.ArgumentError(None,
|
||||
"No --destination argument, and no default for the environment")
|
||||
|
||||
install(browser, kwargs["component"], destination)
|
||||
|
||||
|
||||
def install(name, component, destination):
|
||||
if component == 'webdriver':
|
||||
method = 'install_webdriver'
|
||||
else:
|
||||
method = 'install'
|
||||
|
||||
subclass = getattr(browser, name.title())
|
||||
sys.stdout.write('Now installing %s %s...\n' % (name, component))
|
||||
getattr(subclass(), method)(dest=destination)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
run(None, **vars(args))
|
55
tests/wpt/web-platform-tests/tools/wpt/markdown.py
Normal file
55
tests/wpt/web-platform-tests/tools/wpt/markdown.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
def format_comment_title(product):
|
||||
"""Produce a Markdown-formatted string based on a given "product"--a string
|
||||
containing a browser identifier optionally followed by a colon and a
|
||||
release channel. (For example: "firefox" or "chrome:dev".) The generated
|
||||
title string is used both to create new comments and to locate (and
|
||||
subsequently update) previously-submitted comments."""
|
||||
parts = product.split(":")
|
||||
title = parts[0].title()
|
||||
|
||||
if len(parts) > 1:
|
||||
title += " (%s)" % parts[1]
|
||||
|
||||
return "# %s #" % title
|
||||
|
||||
|
||||
def markdown_adjust(s):
|
||||
"""Escape problematic markdown sequences."""
|
||||
s = s.replace('\t', u'\\t')
|
||||
s = s.replace('\n', u'\\n')
|
||||
s = s.replace('\r', u'\\r')
|
||||
s = s.replace('`', u'')
|
||||
s = s.replace('|', u'\\|')
|
||||
return s
|
||||
|
||||
|
||||
def table(headings, data, log):
|
||||
"""Create and log data to specified logger in tabular format."""
|
||||
cols = range(len(headings))
|
||||
assert all(len(item) == len(cols) for item in data)
|
||||
max_widths = reduce(lambda prev, cur: [(len(cur[i]) + 2)
|
||||
if (len(cur[i]) + 2) > prev[i]
|
||||
else prev[i]
|
||||
for i in cols],
|
||||
data,
|
||||
[len(item) + 2 for item in headings])
|
||||
log("|%s|" % "|".join(item.center(max_widths[i]) for i, item in enumerate(headings)))
|
||||
log("|%s|" % "|".join("-" * max_widths[i] for i in cols))
|
||||
for row in data:
|
||||
log("|%s|" % "|".join(" %s" % row[i].ljust(max_widths[i] - 1) for i in cols))
|
||||
log("")
|
||||
|
||||
|
||||
def err_string(results_dict, iterations):
|
||||
"""Create and return string with errors from test run."""
|
||||
rv = []
|
||||
total_results = sum(results_dict.values())
|
||||
for key, value in sorted(results_dict.items()):
|
||||
rv.append("%s%s" %
|
||||
(key, ": %s/%s" % (value, iterations) if value != iterations else ""))
|
||||
if total_results < iterations:
|
||||
rv.append("MISSING: %s/%s" % (iterations - total_results, iterations))
|
||||
rv = ", ".join(rv)
|
||||
if is_inconsistent(results_dict, iterations):
|
||||
rv = "**%s**" % rv
|
||||
return rv
|
5
tests/wpt/web-platform-tests/tools/wpt/paths
Normal file
5
tests/wpt/web-platform-tests/tools/wpt/paths
Normal file
|
@ -0,0 +1,5 @@
|
|||
tools/ci/
|
||||
tools/lint/
|
||||
tools/manifest/
|
||||
tools/serve/
|
||||
tools/wpt/
|
419
tests/wpt/web-platform-tests/tools/wpt/run.py
Normal file
419
tests/wpt/web-platform-tests/tools/wpt/run.py
Normal file
|
@ -0,0 +1,419 @@
|
|||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tarfile
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
wpt_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(wpt_root, "tools")))
|
||||
|
||||
from . import browser, utils, virtualenv
|
||||
logger = None
|
||||
|
||||
|
||||
class WptrunError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class WptrunnerHelpAction(argparse.Action):
|
||||
def __init__(self,
|
||||
option_strings,
|
||||
dest=argparse.SUPPRESS,
|
||||
default=argparse.SUPPRESS,
|
||||
help=None):
|
||||
super(WptrunnerHelpAction, self).__init__(
|
||||
option_strings=option_strings,
|
||||
dest=dest,
|
||||
default=default,
|
||||
nargs=0,
|
||||
help=help)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
from wptrunner import wptcommandline
|
||||
wptparser = wptcommandline.create_parser()
|
||||
wptparser.usage = parser.usage
|
||||
wptparser.print_help()
|
||||
parser.exit()
|
||||
|
||||
|
||||
def create_parser():
|
||||
from wptrunner import wptcommandline
|
||||
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
parser.add_argument("product", action="store",
|
||||
help="Browser to run tests in")
|
||||
parser.add_argument("--yes", "-y", dest="prompt", action="store_false", default=True,
|
||||
help="Don't prompt before installing components")
|
||||
parser.add_argument("--stability", action="store_true",
|
||||
help="Stability check tests")
|
||||
parser.add_argument("--install-browser", action="store_true",
|
||||
help="Install the latest development version of the browser")
|
||||
parser._add_container_actions(wptcommandline.create_parser())
|
||||
return parser
|
||||
|
||||
|
||||
def exit(msg):
|
||||
logger.error(msg)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def args_general(kwargs):
|
||||
kwargs.set_if_none("tests_root", wpt_root)
|
||||
kwargs.set_if_none("metadata_root", wpt_root)
|
||||
kwargs.set_if_none("manifest_update", True)
|
||||
|
||||
if kwargs["ssl_type"] in (None, "pregenerated"):
|
||||
cert_root = os.path.join(wpt_root, "tools", "certs")
|
||||
if kwargs["ca_cert_path"] is None:
|
||||
kwargs["ca_cert_path"] = os.path.join(cert_root, "cacert.pem")
|
||||
|
||||
if kwargs["host_key_path"] is None:
|
||||
kwargs["host_key_path"] = os.path.join(cert_root, "web-platform.test.key")
|
||||
|
||||
if kwargs["host_cert_path"] is None:
|
||||
kwargs["host_cert_path"] = os.path.join(cert_root, "web-platform.test.pem")
|
||||
elif kwargs["ssl_type"] == "openssl":
|
||||
if not find_executable(kwargs["openssl_binary"]):
|
||||
if os.uname()[0] == "Windows":
|
||||
raise WptrunError("""OpenSSL binary not found. If you need HTTPS tests, install OpenSSL from
|
||||
|
||||
https://slproweb.com/products/Win32OpenSSL.html
|
||||
|
||||
Ensuring that libraries are added to /bin and add the resulting bin directory to
|
||||
your PATH.
|
||||
|
||||
Otherwise run with --ssl-type=none""")
|
||||
else:
|
||||
raise WptrunError("""OpenSSL not found. If you don't need HTTPS support run with --ssl-type=none,
|
||||
otherwise install OpenSSL and ensure that it's on your $PATH.""")
|
||||
|
||||
|
||||
def check_environ(product):
|
||||
if product not in ("firefox", "servo"):
|
||||
expected_hosts = ["web-platform.test",
|
||||
"www.web-platform.test",
|
||||
"www1.web-platform.test",
|
||||
"www2.web-platform.test",
|
||||
"xn--n8j6ds53lwwkrqhv28a.web-platform.test",
|
||||
"xn--lve-6lad.web-platform.test",
|
||||
"nonexistent-origin.web-platform.test"]
|
||||
missing_hosts = set(expected_hosts)
|
||||
if platform.uname()[0] != "Windows":
|
||||
hosts_path = "/etc/hosts"
|
||||
else:
|
||||
hosts_path = "C:\Windows\System32\drivers\etc\hosts"
|
||||
with open(hosts_path, "r") as f:
|
||||
for line in f:
|
||||
line = line.split("#", 1)[0].strip()
|
||||
parts = line.split()
|
||||
if len(parts) == 2:
|
||||
host = parts[1]
|
||||
missing_hosts.discard(host)
|
||||
if missing_hosts:
|
||||
raise WptrunError("""Missing hosts file configuration. Expected entries like:
|
||||
|
||||
%s
|
||||
|
||||
See README.md for more details.""" % "\n".join("%s\t%s" %
|
||||
("127.0.0.1" if "nonexistent" not in host else "0.0.0.0", host)
|
||||
for host in expected_hosts))
|
||||
|
||||
|
||||
class BrowserSetup(object):
|
||||
name = None
|
||||
browser_cls = None
|
||||
|
||||
def __init__(self, venv, prompt=True, sub_product=None):
|
||||
self.browser = self.browser_cls()
|
||||
self.venv = venv
|
||||
self.prompt = prompt
|
||||
self.sub_product = sub_product
|
||||
|
||||
def prompt_install(self, component):
|
||||
if not self.prompt:
|
||||
return True
|
||||
while True:
|
||||
resp = raw_input("Download and install %s [Y/n]? " % component).strip().lower()
|
||||
if not resp or resp == "y":
|
||||
return True
|
||||
elif resp == "n":
|
||||
return False
|
||||
|
||||
def install(self, venv):
|
||||
if self.prompt_install(self.name):
|
||||
return self.browser.install(venv.path)
|
||||
|
||||
def setup(self, kwargs):
|
||||
self.venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", self.browser.requirements))
|
||||
self.setup_kwargs(kwargs)
|
||||
|
||||
|
||||
class Firefox(BrowserSetup):
|
||||
name = "firefox"
|
||||
browser_cls = browser.Firefox
|
||||
|
||||
def setup_kwargs(self, kwargs):
|
||||
if kwargs["binary"] is None:
|
||||
binary = self.browser.find_binary()
|
||||
if binary is None:
|
||||
raise WptrunError("""Firefox binary not found on $PATH.
|
||||
|
||||
Install Firefox or use --binary to set the binary path""")
|
||||
kwargs["binary"] = binary
|
||||
|
||||
if kwargs["certutil_binary"] is None and kwargs["ssl_type"] != "none":
|
||||
certutil = self.browser.find_certutil()
|
||||
|
||||
if certutil is None:
|
||||
# Can't download this for now because it's missing the libnss3 library
|
||||
raise WptrunError("""Can't find certutil.
|
||||
|
||||
This must be installed using your OS package manager or directly e.g.
|
||||
|
||||
Debian/Ubuntu:
|
||||
sudo apt install libnss3-tools
|
||||
|
||||
macOS/Homebrew:
|
||||
brew install nss
|
||||
|
||||
Others:
|
||||
Download the firefox archive and common.tests.zip archive for your platform
|
||||
from https://archive.mozilla.org/pub/firefox/nightly/latest-mozilla-central/
|
||||
|
||||
Then extract certutil[.exe] from the tests.zip package and
|
||||
libnss3[.so|.dll|.dynlib] and but the former on your path and the latter on
|
||||
your library path.
|
||||
""")
|
||||
else:
|
||||
print("Using certutil %s" % certutil)
|
||||
|
||||
if certutil is not None:
|
||||
kwargs["certutil_binary"] = certutil
|
||||
else:
|
||||
print("Unable to find or install certutil, setting ssl-type to none")
|
||||
kwargs["ssl_type"] = "none"
|
||||
|
||||
if kwargs["webdriver_binary"] is None and "wdspec" in kwargs["test_types"]:
|
||||
webdriver_binary = self.browser.find_webdriver()
|
||||
|
||||
if webdriver_binary is None:
|
||||
install = self.prompt_install("geckodriver")
|
||||
|
||||
if install:
|
||||
print("Downloading geckodriver")
|
||||
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path)
|
||||
else:
|
||||
print("Using webdriver binary %s" % webdriver_binary)
|
||||
|
||||
if webdriver_binary:
|
||||
kwargs["webdriver_binary"] = webdriver_binary
|
||||
else:
|
||||
print("Unable to find or install geckodriver, skipping wdspec tests")
|
||||
kwargs["test_types"].remove("wdspec")
|
||||
|
||||
if kwargs["prefs_root"] is None:
|
||||
print("Downloading gecko prefs")
|
||||
prefs_root = self.browser.install_prefs(self.venv.path)
|
||||
kwargs["prefs_root"] = prefs_root
|
||||
|
||||
|
||||
class Chrome(BrowserSetup):
|
||||
name = "chrome"
|
||||
browser_cls = browser.Chrome
|
||||
|
||||
def setup_kwargs(self, kwargs):
|
||||
if kwargs["webdriver_binary"] is None:
|
||||
webdriver_binary = self.browser.find_webdriver()
|
||||
|
||||
if webdriver_binary is None:
|
||||
install = self.prompt_install("chromedriver")
|
||||
|
||||
if install:
|
||||
print("Downloading chromedriver")
|
||||
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path)
|
||||
else:
|
||||
print("Using webdriver binary %s" % webdriver_binary)
|
||||
|
||||
if webdriver_binary:
|
||||
kwargs["webdriver_binary"] = webdriver_binary
|
||||
else:
|
||||
raise WptrunError("Unable to locate or install chromedriver binary")
|
||||
|
||||
|
||||
class Edge(BrowserSetup):
|
||||
name = "edge"
|
||||
browser_cls = browser.Edge
|
||||
|
||||
def install(self, venv):
|
||||
raise NotImplementedError
|
||||
|
||||
def setup_kwargs(self, kwargs):
|
||||
if kwargs["webdriver_binary"] is None:
|
||||
webdriver_binary = self.browser.find_webdriver()
|
||||
|
||||
if webdriver_binary is None:
|
||||
raise WptrunError("""Unable to find WebDriver and we aren't yet clever enough to work out which
|
||||
version to download. Please go to the following URL and install the correct
|
||||
version for your Edge/Windows release somewhere on the %PATH%:
|
||||
|
||||
https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
|
||||
""")
|
||||
kwargs["webdriver_binary"] = webdriver_binary
|
||||
|
||||
|
||||
class InternetExplorer(BrowserSetup):
|
||||
name = "ie"
|
||||
browser_cls = browser.InternetExplorer
|
||||
|
||||
def install(self, venv):
|
||||
raise NotImplementedError
|
||||
|
||||
def setup_kwargs(self, kwargs):
|
||||
if kwargs["webdriver_binary"] is None:
|
||||
webdriver_binary = self.browser.find_webdriver()
|
||||
|
||||
if webdriver_binary is None:
|
||||
raise WptrunError("""Unable to find WebDriver and we aren't yet clever enough to work out which
|
||||
version to download. Please go to the following URL and install the driver for Internet Explorer
|
||||
somewhere on the %PATH%:
|
||||
|
||||
https://selenium-release.storage.googleapis.com/index.html
|
||||
""")
|
||||
kwargs["webdriver_binary"] = webdriver_binary
|
||||
|
||||
|
||||
class Sauce(BrowserSetup):
|
||||
name = "sauce"
|
||||
browser_cls = browser.Sauce
|
||||
|
||||
def install(self, venv):
|
||||
raise NotImplementedError
|
||||
|
||||
def setup_kwargs(self, kwargs):
|
||||
kwargs.set_if_none("sauce_browser", self.sub_product[0])
|
||||
kwargs.set_if_none("sauce_version", self.sub_product[1])
|
||||
kwargs["test_types"] = ["testharness", "reftest"]
|
||||
|
||||
|
||||
class Servo(BrowserSetup):
|
||||
name = "servo"
|
||||
browser_cls = browser.Servo
|
||||
|
||||
def install(self, venv):
|
||||
raise NotImplementedError
|
||||
|
||||
def setup_kwargs(self, kwargs):
|
||||
if kwargs["binary"] is None:
|
||||
binary = self.browser.find_binary()
|
||||
|
||||
if binary is None:
|
||||
raise WptrunError("Unable to find servo binary on the PATH")
|
||||
kwargs["binary"] = binary
|
||||
|
||||
|
||||
product_setup = {
|
||||
"firefox": Firefox,
|
||||
"chrome": Chrome,
|
||||
"edge": Edge,
|
||||
"ie": InternetExplorer,
|
||||
"servo": Servo,
|
||||
"sauce": Sauce,
|
||||
}
|
||||
|
||||
|
||||
def setup_wptrunner(venv, prompt=True, install=False, **kwargs):
|
||||
from wptrunner import wptrunner, wptcommandline
|
||||
|
||||
global logger
|
||||
|
||||
kwargs = utils.Kwargs(kwargs.iteritems())
|
||||
|
||||
product_parts = kwargs["product"].split(":")
|
||||
kwargs["product"] = product_parts[0]
|
||||
sub_product = product_parts[1:]
|
||||
|
||||
wptrunner.setup_logging(kwargs, {"mach": sys.stdout})
|
||||
logger = wptrunner.logger
|
||||
|
||||
check_environ(kwargs["product"])
|
||||
args_general(kwargs)
|
||||
|
||||
if kwargs["product"] not in product_setup:
|
||||
raise WptrunError("Unsupported product %s" % kwargs["product"])
|
||||
|
||||
setup_cls = product_setup[kwargs["product"]](venv, prompt, sub_product)
|
||||
|
||||
if install:
|
||||
logger.info("Installing browser")
|
||||
kwargs["binary"] = setup_cls.install(venv)
|
||||
|
||||
setup_cls.setup(kwargs)
|
||||
|
||||
wptcommandline.check_args(kwargs)
|
||||
|
||||
wptrunner_path = os.path.join(wpt_root, "tools", "wptrunner")
|
||||
|
||||
venv.install_requirements(os.path.join(wptrunner_path, "requirements.txt"))
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
def run(venv, **kwargs):
|
||||
#Remove arguments that aren't passed to wptrunner
|
||||
prompt = kwargs.pop("prompt", True)
|
||||
stability = kwargs.pop("stability", True)
|
||||
install_browser = kwargs.pop("install_browser", False)
|
||||
|
||||
kwargs = setup_wptrunner(venv,
|
||||
prompt=prompt,
|
||||
install=install_browser,
|
||||
**kwargs)
|
||||
|
||||
if stability:
|
||||
import stability
|
||||
iterations, results, inconsistent = stability.run(venv, logger, **kwargs)
|
||||
|
||||
def log(x):
|
||||
print(x)
|
||||
|
||||
if inconsistent:
|
||||
stability.write_inconsistent(log, inconsistent, iterations)
|
||||
else:
|
||||
log("All tests stable")
|
||||
rv = len(inconsistent) > 0
|
||||
else:
|
||||
rv = run_single(venv, **kwargs) > 0
|
||||
|
||||
return rv
|
||||
|
||||
|
||||
def run_single(venv, **kwargs):
|
||||
from wptrunner import wptrunner
|
||||
return wptrunner.start(**kwargs)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
parser = create_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
venv = virtualenv.Virtualenv(os.path.join(wpt_root, "_venv_%s") % platform.uname()[0])
|
||||
venv.start()
|
||||
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", "requirements.txt"))
|
||||
venv.install("requests")
|
||||
|
||||
return run(venv, vars(args))
|
||||
except WptrunError as e:
|
||||
exit(e.message)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import pdb
|
||||
from tools import localpaths
|
||||
try:
|
||||
main()
|
||||
except:
|
||||
pdb.post_mortem()
|
195
tests/wpt/web-platform-tests/tools/wpt/stability.py
Normal file
195
tests/wpt/web-platform-tests/tools/wpt/stability.py
Normal file
|
@ -0,0 +1,195 @@
|
|||
import os
|
||||
import sys
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
from mozlog import reader
|
||||
from mozlog.formatters import JSONFormatter, TbplFormatter
|
||||
from mozlog.handlers import BaseHandler, LogLevelFilter, StreamHandler
|
||||
|
||||
from markdown import markdown_adjust, table
|
||||
from wptrunner import wptrunner
|
||||
|
||||
|
||||
class LogActionFilter(BaseHandler):
|
||||
|
||||
"""Handler that filters out messages not of a given set of actions.
|
||||
|
||||
Subclasses BaseHandler.
|
||||
|
||||
:param inner: Handler to use for messages that pass this filter
|
||||
:param actions: List of actions for which to fire the handler
|
||||
"""
|
||||
|
||||
def __init__(self, inner, actions):
|
||||
"""Extend BaseHandler and set inner and actions props on self."""
|
||||
BaseHandler.__init__(self, inner)
|
||||
self.inner = inner
|
||||
self.actions = actions
|
||||
|
||||
def __call__(self, item):
|
||||
"""Invoke handler if action is in list passed as constructor param."""
|
||||
if item["action"] in self.actions:
|
||||
return self.inner(item)
|
||||
|
||||
|
||||
class LogHandler(reader.LogHandler):
|
||||
|
||||
"""Handle updating test and subtest status in log.
|
||||
|
||||
Subclasses reader.LogHandler.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.results = OrderedDict()
|
||||
|
||||
def find_or_create_test(self, data):
|
||||
test_name = data["test"]
|
||||
if self.results.get(test_name):
|
||||
return self.results[test_name]
|
||||
|
||||
test = {
|
||||
"subtests": OrderedDict(),
|
||||
"status": defaultdict(int)
|
||||
}
|
||||
self.results[test_name] = test
|
||||
return test
|
||||
|
||||
def find_or_create_subtest(self, data):
|
||||
test = self.find_or_create_test(data)
|
||||
subtest_name = data["subtest"]
|
||||
|
||||
if test["subtests"].get(subtest_name):
|
||||
return test["subtests"][subtest_name]
|
||||
|
||||
subtest = {
|
||||
"status": defaultdict(int),
|
||||
"messages": set()
|
||||
}
|
||||
test["subtests"][subtest_name] = subtest
|
||||
|
||||
return subtest
|
||||
|
||||
def test_status(self, data):
|
||||
subtest = self.find_or_create_subtest(data)
|
||||
subtest["status"][data["status"]] += 1
|
||||
if data.get("message"):
|
||||
subtest["messages"].add(data["message"])
|
||||
|
||||
def test_end(self, data):
|
||||
test = self.find_or_create_test(data)
|
||||
test["status"][data["status"]] += 1
|
||||
|
||||
|
||||
def is_inconsistent(results_dict, iterations):
|
||||
"""Return whether or not a single test is inconsistent."""
|
||||
return len(results_dict) > 1 or sum(results_dict.values()) != iterations
|
||||
|
||||
|
||||
def process_results(log, iterations):
|
||||
"""Process test log and return overall results and list of inconsistent tests."""
|
||||
inconsistent = []
|
||||
handler = LogHandler()
|
||||
reader.handle_log(reader.read(log), handler)
|
||||
results = handler.results
|
||||
for test_name, test in results.iteritems():
|
||||
if is_inconsistent(test["status"], iterations):
|
||||
inconsistent.append((test_name, None, test["status"], []))
|
||||
for subtest_name, subtest in test["subtests"].iteritems():
|
||||
if is_inconsistent(subtest["status"], iterations):
|
||||
inconsistent.append((test_name, subtest_name, subtest["status"], subtest["messages"]))
|
||||
return results, inconsistent
|
||||
|
||||
|
||||
def err_string(results_dict, iterations):
|
||||
"""Create and return string with errors from test run."""
|
||||
rv = []
|
||||
total_results = sum(results_dict.values())
|
||||
for key, value in sorted(results_dict.items()):
|
||||
rv.append("%s%s" %
|
||||
(key, ": %s/%s" % (value, iterations) if value != iterations else ""))
|
||||
if total_results < iterations:
|
||||
rv.append("MISSING: %s/%s" % (iterations - total_results, iterations))
|
||||
rv = ", ".join(rv)
|
||||
if is_inconsistent(results_dict, iterations):
|
||||
rv = "**%s**" % rv
|
||||
return rv
|
||||
|
||||
|
||||
def write_inconsistent(log, inconsistent, iterations):
|
||||
"""Output inconsistent tests to logger.error."""
|
||||
log("## Unstable results ##\n")
|
||||
strings = [(
|
||||
"`%s`" % markdown_adjust(test),
|
||||
("`%s`" % markdown_adjust(subtest)) if subtest else "",
|
||||
err_string(results, iterations),
|
||||
("`%s`" % markdown_adjust(";".join(messages))) if len(messages) else "")
|
||||
for test, subtest, results, messages in inconsistent]
|
||||
table(["Test", "Subtest", "Results", "Messages"], strings, log)
|
||||
|
||||
|
||||
def write_results(log, results, iterations, pr_number=None, use_details=False):
|
||||
log("## All results ##\n")
|
||||
if use_details:
|
||||
log("<details>\n")
|
||||
log("<summary>%i %s ran</summary>\n\n" % (len(results),
|
||||
"tests" if len(results) > 1
|
||||
else "test"))
|
||||
|
||||
for test_name, test in results.iteritems():
|
||||
baseurl = "http://w3c-test.org/submissions"
|
||||
if "https" in os.path.splitext(test_name)[0].split(".")[1:]:
|
||||
baseurl = "https://w3c-test.org/submissions"
|
||||
title = test_name
|
||||
if use_details:
|
||||
log("<details>\n")
|
||||
if pr_number:
|
||||
title = "<a href=\"%s/%s%s\">%s</a>" % (baseurl, pr_number, test_name, title)
|
||||
log('<summary>%s</summary>\n\n' % title)
|
||||
else:
|
||||
log("### %s ###" % title)
|
||||
strings = [("", err_string(test["status"], iterations), "")]
|
||||
|
||||
strings.extend(((
|
||||
("`%s`" % markdown_adjust(subtest_name)) if subtest else "",
|
||||
err_string(subtest["status"], iterations),
|
||||
("`%s`" % markdown_adjust(';'.join(subtest["messages"]))) if len(subtest["messages"]) else "")
|
||||
for subtest_name, subtest in test["subtests"].items()))
|
||||
table(["Subtest", "Results", "Messages"], strings, log)
|
||||
if use_details:
|
||||
log("</details>\n")
|
||||
|
||||
if use_details:
|
||||
log("</details>\n")
|
||||
|
||||
|
||||
def run(venv, logger, **kwargs):
|
||||
kwargs["pause_after_test"] = False
|
||||
if kwargs["repeat"] == 1:
|
||||
kwargs["repeat"] = 10
|
||||
|
||||
handler = LogActionFilter(
|
||||
LogLevelFilter(
|
||||
StreamHandler(
|
||||
sys.stdout,
|
||||
TbplFormatter()
|
||||
),
|
||||
"WARNING"),
|
||||
["log", "process_output"])
|
||||
|
||||
# There is a public API for this in the next mozlog
|
||||
initial_handlers = logger._state.handlers
|
||||
logger._state.handlers = []
|
||||
|
||||
with open("raw.log", "wb") as log:
|
||||
# Setup logging for wptrunner that keeps process output and
|
||||
# warning+ level logs only
|
||||
logger.add_handler(handler)
|
||||
logger.add_handler(StreamHandler(log, JSONFormatter()))
|
||||
|
||||
wptrunner.run_tests(**kwargs)
|
||||
|
||||
logger._state.handlers = initial_handlers
|
||||
|
||||
with open("raw.log", "rb") as log:
|
||||
results, inconsistent = process_results(log, kwargs["repeat"])
|
||||
|
||||
return kwargs["repeat"], results, inconsistent
|
286
tests/wpt/web-platform-tests/tools/wpt/testfiles.py
Normal file
286
tests/wpt/web-platform-tests/tools/wpt/testfiles.py
Normal file
|
@ -0,0 +1,286 @@
|
|||
import argparse
|
||||
import itertools
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from ..manifest import manifest, update
|
||||
|
||||
here = os.path.dirname(__file__)
|
||||
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
def get_git_cmd(repo_path):
|
||||
"""Create a function for invoking git commands as a subprocess."""
|
||||
def git(cmd, *args):
|
||||
full_cmd = ["git", cmd] + list(item.decode("utf8") if isinstance(item, bytes) else item for item in args)
|
||||
try:
|
||||
logger.debug(" ".join(full_cmd))
|
||||
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT).decode("utf8").strip()
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error("Git command exited with status %i" % e.returncode)
|
||||
logger.error(e.output)
|
||||
sys.exit(1)
|
||||
return git
|
||||
|
||||
|
||||
def branch_point():
|
||||
git = get_git_cmd(wpt_root)
|
||||
if os.environ.get("TRAVIS_PULL_REQUEST", "false") != "false":
|
||||
# This is a PR, so the base branch is in TRAVIS_BRANCH
|
||||
travis_branch = os.environ.get("TRAVIS_BRANCH")
|
||||
assert travis_branch, "TRAVIS_BRANCH environment variable is defined"
|
||||
branch_point = git("rev-parse", travis_branch)
|
||||
else:
|
||||
# Otherwise we aren't on a PR, so we try to find commits that are only in the
|
||||
# current branch c.f.
|
||||
# http://stackoverflow.com/questions/13460152/find-first-ancestor-commit-in-another-branch
|
||||
head = git("rev-parse", "HEAD")
|
||||
not_heads = [item for item in git("rev-parse", "--not", "--all").split("\n")
|
||||
if item.strip() and head not in item]
|
||||
commits = git("rev-list", "HEAD", *not_heads).split("\n")
|
||||
branch_point = None
|
||||
if len(commits):
|
||||
first_commit = commits[-1]
|
||||
if first_commit:
|
||||
branch_point = git("rev-parse", first_commit + "^")
|
||||
|
||||
# The above heuristic will fail in the following cases:
|
||||
#
|
||||
# - The current branch has fallen behind the version retrieved via the above
|
||||
# `fetch` invocation
|
||||
# - Changes on the current branch were rebased and therefore do not exist on any
|
||||
# other branch. This will result in the selection of a commit that is earlier
|
||||
# in the history than desired (as determined by calculating the later of the
|
||||
# branch point and the merge base)
|
||||
#
|
||||
# In either case, fall back to using the merge base as the branch point.
|
||||
merge_base = git("merge-base", "HEAD", "origin/master")
|
||||
if (branch_point is None or
|
||||
(branch_point != merge_base and
|
||||
not git("log", "--oneline", "%s..%s" % (merge_base, branch_point)).strip())):
|
||||
logger.debug("Using merge-base as the branch point")
|
||||
branch_point = merge_base
|
||||
else:
|
||||
logger.debug("Using first commit on another branch as the branch point")
|
||||
|
||||
logger.debug("Branch point from master: %s" % branch_point)
|
||||
return branch_point
|
||||
|
||||
|
||||
def files_changed(revish, ignore_dirs=None, include_uncommitted=False, include_new=False):
|
||||
"""Get and return files changed since current branch diverged from master,
|
||||
excluding those that are located within any directory specifed by
|
||||
`ignore_changes`."""
|
||||
if ignore_dirs is None:
|
||||
ignore_dirs = []
|
||||
|
||||
git = get_git_cmd(wpt_root)
|
||||
files = git("diff", "--name-only", "-z", revish).split("\0")
|
||||
assert not files[-1]
|
||||
files = set(files[:-1])
|
||||
|
||||
if include_uncommitted:
|
||||
entries = git("status", "-z").split("\0")
|
||||
assert not entries[-1]
|
||||
entries = entries[:-1]
|
||||
for item in entries:
|
||||
status, path = item.split()
|
||||
if status == "??" and not include_new:
|
||||
continue
|
||||
else:
|
||||
if not os.path.isdir(path):
|
||||
files.add(path)
|
||||
else:
|
||||
for dirpath, dirnames, filenames in os.walk(path):
|
||||
for filename in filenames:
|
||||
files.add(os.path.join(dirpath, filename))
|
||||
|
||||
if not files:
|
||||
return [], []
|
||||
|
||||
changed = []
|
||||
ignored = []
|
||||
for item in sorted(files):
|
||||
fullpath = os.path.join(wpt_root, item)
|
||||
topmost_dir = item.split(os.sep, 1)[0]
|
||||
if topmost_dir in ignore_dirs:
|
||||
ignored.append(fullpath)
|
||||
else:
|
||||
changed.append(fullpath)
|
||||
|
||||
return changed, ignored
|
||||
|
||||
|
||||
def _in_repo_root(full_path):
|
||||
rel_path = os.path.relpath(full_path, wpt_root)
|
||||
path_components = rel_path.split(os.sep)
|
||||
return len(path_components) < 2
|
||||
|
||||
def _init_manifest_cache():
|
||||
c = {}
|
||||
|
||||
def load(manifest_path=None):
|
||||
if manifest_path is None:
|
||||
manifest_path = os.path.join(wpt_root, "MANIFEST.json")
|
||||
if c.get(manifest_path):
|
||||
return c[manifest_path]
|
||||
# cache at most one path:manifest
|
||||
c.clear()
|
||||
wpt_manifest = manifest.load(wpt_root, manifest_path)
|
||||
if wpt_manifest is None:
|
||||
wpt_manifest = manifest.Manifest()
|
||||
update.update(wpt_root, wpt_manifest)
|
||||
c[manifest_path] = wpt_manifest
|
||||
return c[manifest_path]
|
||||
return load
|
||||
|
||||
load_manifest = _init_manifest_cache()
|
||||
|
||||
|
||||
def affected_testfiles(files_changed, skip_tests, manifest_path=None):
|
||||
"""Determine and return list of test files that reference changed files."""
|
||||
affected_testfiles = set()
|
||||
# Exclude files that are in the repo root, because
|
||||
# they are not part of any test.
|
||||
files_changed = [f for f in files_changed if not _in_repo_root(f)]
|
||||
nontests_changed = set(files_changed)
|
||||
wpt_manifest = load_manifest(manifest_path)
|
||||
|
||||
test_types = ["testharness", "reftest", "wdspec"]
|
||||
support_files = {os.path.join(wpt_root, path)
|
||||
for _, path, _ in wpt_manifest.itertypes("support")}
|
||||
wdspec_test_files = {os.path.join(wpt_root, path)
|
||||
for _, path, _ in wpt_manifest.itertypes("wdspec")}
|
||||
test_files = {os.path.join(wpt_root, path)
|
||||
for _, path, _ in wpt_manifest.itertypes(*test_types)}
|
||||
|
||||
nontests_changed = nontests_changed.intersection(support_files)
|
||||
|
||||
tests_changed = set(item for item in files_changed if item in test_files)
|
||||
|
||||
nontest_changed_paths = set()
|
||||
for full_path in nontests_changed:
|
||||
rel_path = os.path.relpath(full_path, wpt_root)
|
||||
path_components = rel_path.split(os.sep)
|
||||
top_level_subdir = path_components[0]
|
||||
if top_level_subdir in skip_tests:
|
||||
continue
|
||||
repo_path = "/" + os.path.relpath(full_path, wpt_root).replace(os.path.sep, "/")
|
||||
nontest_changed_paths.add((full_path, repo_path))
|
||||
|
||||
def affected_by_wdspec(test):
|
||||
affected = False
|
||||
if test in wdspec_test_files:
|
||||
for support_full_path, _ in nontest_changed_paths:
|
||||
# parent of support file or of "support" directory
|
||||
parent = os.path.dirname(support_full_path)
|
||||
if os.path.basename(parent) == "support":
|
||||
parent = os.path.dirname(parent)
|
||||
relpath = os.path.relpath(test, parent)
|
||||
if not relpath.startswith(os.pardir):
|
||||
# testfile is in subtree of support file
|
||||
affected = True
|
||||
break
|
||||
return affected
|
||||
|
||||
for root, dirs, fnames in os.walk(wpt_root):
|
||||
# Walk top_level_subdir looking for test files containing either the
|
||||
# relative filepath or absolute filepath to the changed files.
|
||||
if root == wpt_root:
|
||||
for dir_name in skip_tests:
|
||||
dirs.remove(dir_name)
|
||||
for fname in fnames:
|
||||
test_full_path = os.path.join(root, fname)
|
||||
# Skip any file that's not a test file.
|
||||
if test_full_path not in test_files:
|
||||
continue
|
||||
if affected_by_wdspec(test_full_path):
|
||||
affected_testfiles.add(test_full_path)
|
||||
continue
|
||||
|
||||
with open(test_full_path, "rb") as fh:
|
||||
file_contents = fh.read()
|
||||
if file_contents.startswith("\xfe\xff"):
|
||||
file_contents = file_contents.decode("utf-16be", "replace")
|
||||
elif file_contents.startswith("\xff\xfe"):
|
||||
file_contents = file_contents.decode("utf-16le", "replace")
|
||||
else:
|
||||
file_contents = file_contents.decode("utf8", "replace")
|
||||
for full_path, repo_path in nontest_changed_paths:
|
||||
rel_path = os.path.relpath(full_path, root).replace(os.path.sep, "/")
|
||||
if rel_path in file_contents or repo_path in file_contents:
|
||||
affected_testfiles.add(test_full_path)
|
||||
continue
|
||||
|
||||
return tests_changed, affected_testfiles
|
||||
|
||||
|
||||
def get_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("revish", default=None, help="Commits to consider. Defaults to the commits on the current branch", nargs="?")
|
||||
parser.add_argument("--ignore-dirs", nargs="*", type=set, default=set(["resources"]),
|
||||
help="Directories to exclude from the list of changes")
|
||||
parser.add_argument("--modified", action="store_true",
|
||||
help="Include files under version control that have been modified or staged")
|
||||
parser.add_argument("--new", action="store_true",
|
||||
help="Include files in the worktree that are not in version control")
|
||||
parser.add_argument("--show-type", action="store_true",
|
||||
help="Print the test type along with each affected test")
|
||||
return parser
|
||||
|
||||
|
||||
def get_parser_affected():
|
||||
parser = get_parser()
|
||||
parser.add_argument("--metadata",
|
||||
dest="metadata_root",
|
||||
action="store",
|
||||
default=wpt_root,
|
||||
help="Directory that will contain MANIFEST.json")
|
||||
return parser
|
||||
|
||||
def get_revish(**kwargs):
|
||||
revish = kwargs["revish"]
|
||||
if kwargs["revish"] is None:
|
||||
revish = "%s..HEAD" % branch_point()
|
||||
return revish
|
||||
|
||||
|
||||
def run_changed_files(**kwargs):
|
||||
revish = get_revish(**kwargs)
|
||||
changed, _ = files_changed(revish, kwargs["ignore_dirs"],
|
||||
include_uncommitted=kwargs["modified"],
|
||||
include_new=kwargs["new"])
|
||||
for item in sorted(changed):
|
||||
print(os.path.relpath(item, wpt_root))
|
||||
|
||||
|
||||
def run_tests_affected(**kwargs):
|
||||
revish = get_revish(**kwargs)
|
||||
changed, _ = files_changed(revish, kwargs["ignore_dirs"],
|
||||
include_uncommitted=kwargs["modified"],
|
||||
include_new=kwargs["new"])
|
||||
manifest_path = os.path.join(kwargs["metadata_root"], "MANIFEST.json")
|
||||
tests_changed, dependents = affected_testfiles(
|
||||
changed,
|
||||
set(["conformance-checkers", "docs", "tools"]),
|
||||
manifest_path=manifest_path
|
||||
)
|
||||
|
||||
message = "{path}"
|
||||
if kwargs["show_type"]:
|
||||
wpt_manifest = load_manifest(manifest_path)
|
||||
message = "{path}\t{item_type}"
|
||||
for item in sorted(tests_changed | dependents):
|
||||
results = {
|
||||
"path": os.path.relpath(item, wpt_root)
|
||||
}
|
||||
if kwargs["show_type"]:
|
||||
item_types = {i.item_type for i in wpt_manifest.iterpath(results["path"])}
|
||||
if len(item_types) != 1:
|
||||
item_types = [" ".join(item_types)]
|
||||
results["item_type"] = item_types.pop()
|
||||
print(message.format(**results))
|
138
tests/wpt/web-platform-tests/tools/wpt/tests/test_wpt.py
Normal file
138
tests/wpt/web-platform-tests/tools/wpt/tests/test_wpt.py
Normal file
|
@ -0,0 +1,138 @@
|
|||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import time
|
||||
import urllib2
|
||||
|
||||
import pytest
|
||||
|
||||
from tools.wpt import wpt
|
||||
|
||||
|
||||
# Tests currently don't work on Windows for path reasons
|
||||
|
||||
def test_missing():
|
||||
with pytest.raises(SystemExit):
|
||||
wpt.main(argv=["#missing-command"])
|
||||
|
||||
|
||||
def test_help():
|
||||
# TODO: It seems like there's a bug in argparse that makes this argument order required
|
||||
# should try to work around that
|
||||
with pytest.raises(SystemExit) as excinfo:
|
||||
wpt.main(argv=["--help"])
|
||||
assert excinfo.value.code == 0
|
||||
|
||||
|
||||
def test_run_firefox():
|
||||
# TODO: It seems like there's a bug in argparse that makes this argument order required
|
||||
# should try to work around that
|
||||
os.environ["MOZ_HEADLESS"] = "1"
|
||||
try:
|
||||
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "firefox")
|
||||
if os.path.exists(fx_path):
|
||||
shutil.rmtree(fx_path)
|
||||
with pytest.raises(SystemExit) as excinfo:
|
||||
wpt.main(argv=["run", "--no-pause", "--install-browser", "--yes",
|
||||
"--metadata", "~/meta/",
|
||||
"firefox", "/dom/nodes/Element-tagName.html"])
|
||||
assert os.path.exists(fx_path)
|
||||
shutil.rmtree(fx_path)
|
||||
assert excinfo.value.code == 0
|
||||
finally:
|
||||
del os.environ["MOZ_HEADLESS"]
|
||||
|
||||
|
||||
def test_run_chrome():
|
||||
with pytest.raises(SystemExit) as excinfo:
|
||||
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
|
||||
"--metadata", "~/meta/",
|
||||
"chrome", "/dom/nodes/Element-tagName.html"])
|
||||
assert excinfo.value.code == 0
|
||||
|
||||
|
||||
def test_install_chromedriver():
|
||||
chromedriver_path = os.path.join(wpt.localpaths.repo_root, "_venv", "bin", "chromedriver")
|
||||
if os.path.exists(chromedriver_path):
|
||||
os.unlink(chromedriver_path)
|
||||
with pytest.raises(SystemExit) as excinfo:
|
||||
wpt.main(argv=["install", "chrome", "webdriver"])
|
||||
assert excinfo.value.code == 0
|
||||
assert os.path.exists(chromedriver_path)
|
||||
os.unlink(chromedriver_path)
|
||||
|
||||
|
||||
def test_install_firefox():
|
||||
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "firefox")
|
||||
if os.path.exists(fx_path):
|
||||
shutil.rmtree(fx_path)
|
||||
with pytest.raises(SystemExit) as excinfo:
|
||||
wpt.main(argv=["install", "firefox", "browser"])
|
||||
assert excinfo.value.code == 0
|
||||
assert os.path.exists(fx_path)
|
||||
shutil.rmtree(fx_path)
|
||||
|
||||
|
||||
def test_files_changed(capsys):
|
||||
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
|
||||
with pytest.raises(SystemExit) as excinfo:
|
||||
wpt.main(argv=["files-changed", "%s~..%s" % (commit, commit)])
|
||||
assert excinfo.value.code == 0
|
||||
out, err = capsys.readouterr()
|
||||
assert out == """html/browsers/offline/appcache/workers/appcache-worker.html
|
||||
html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js
|
||||
html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js
|
||||
html/browsers/offline/appcache/workers/resources/appcache-worker-data.py
|
||||
html/browsers/offline/appcache/workers/resources/appcache-worker-import.py
|
||||
html/browsers/offline/appcache/workers/resources/appcache-worker.manifest
|
||||
html/browsers/offline/appcache/workers/resources/appcache-worker.py
|
||||
"""
|
||||
assert err == ""
|
||||
|
||||
|
||||
def test_tests_affected(capsys):
|
||||
# This doesn't really work properly for random commits because we test the files in
|
||||
# the current working directory for references to the changed files, not the ones at
|
||||
# that specific commit. But we can at least test it returns something sensible
|
||||
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
|
||||
with pytest.raises(SystemExit) as excinfo:
|
||||
wpt.main(argv=["tests-affected", "--metadata", "~/meta/", "%s~..%s" % (commit, commit)])
|
||||
assert excinfo.value.code == 0
|
||||
out, err = capsys.readouterr()
|
||||
assert "html/browsers/offline/appcache/workers/appcache-worker.html" in out
|
||||
assert err == ""
|
||||
|
||||
|
||||
def test_serve():
|
||||
def test():
|
||||
s = socket.socket()
|
||||
s.connect(("127.0.0.1", 8000))
|
||||
with pytest.raises(socket.error):
|
||||
test()
|
||||
|
||||
p = subprocess.Popen([os.path.join(wpt.localpaths.repo_root, "wpt"), "serve"],
|
||||
preexec_fn=os.setsid)
|
||||
|
||||
start = time.time()
|
||||
try:
|
||||
while True:
|
||||
if time.time() - start > 60:
|
||||
assert False
|
||||
try:
|
||||
resp = urllib2.urlopen("http://web-platform.test:8000")
|
||||
print resp
|
||||
except urllib2.URLError:
|
||||
print "URLError"
|
||||
time.sleep(1)
|
||||
else:
|
||||
assert resp.code == 200
|
||||
break
|
||||
finally:
|
||||
os.killpg(p.pid, 15)
|
||||
|
||||
# The following commands are slow running and used implicitly in other CI
|
||||
# jobs, so we skip them here:
|
||||
# wpt check-stability
|
||||
# wpt manifest
|
||||
# wpt lint
|
21
tests/wpt/web-platform-tests/tools/wpt/tox.ini
Normal file
21
tests/wpt/web-platform-tests/tools/wpt/tox.ini
Normal file
|
@ -0,0 +1,21 @@
|
|||
[tox]
|
||||
envlist = py27
|
||||
skipsdist=True
|
||||
|
||||
[testenv]
|
||||
deps =
|
||||
flake8
|
||||
pytest
|
||||
pytest-cov
|
||||
hypothesis
|
||||
-r{toxinidir}/../wptrunner/requirements.txt
|
||||
-r{toxinidir}/../wptrunner/requirements_chrome.txt
|
||||
-r{toxinidir}/../wptrunner/requirements_firefox.txt
|
||||
|
||||
commands =
|
||||
pytest --cov
|
||||
flake8
|
||||
|
||||
[flake8]
|
||||
ignore = E128,E129,E221,E226,E231,E251,E265,E302,E303,E305,E402,E901,F401,F821,F841
|
||||
max-line-length = 141
|
|
@ -3,7 +3,7 @@ import sys
|
|||
import logging
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
from utils import call
|
||||
from tools.wpt.utils import call
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
142
tests/wpt/web-platform-tests/tools/wpt/wpt.py
Normal file
142
tests/wpt/web-platform-tests/tools/wpt/wpt.py
Normal file
|
@ -0,0 +1,142 @@
|
|||
import argparse
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
|
||||
from tools import localpaths
|
||||
|
||||
from six import iteritems
|
||||
from . import virtualenv
|
||||
|
||||
|
||||
here = os.path.dirname(__file__)
|
||||
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
|
||||
|
||||
|
||||
def load_commands():
|
||||
rv = {}
|
||||
with open(os.path.join(here, "paths"), "r") as f:
|
||||
paths = [item.strip().replace("/", os.path.sep) for item in f if item.strip()]
|
||||
for path in paths:
|
||||
abs_path = os.path.join(wpt_root, path, "commands.json")
|
||||
base_dir = os.path.dirname(abs_path)
|
||||
with open(abs_path, "r") as f:
|
||||
data = json.load(f)
|
||||
for command, props in iteritems(data):
|
||||
assert "path" in props
|
||||
assert "script" in props
|
||||
rv[command] = {
|
||||
"path": os.path.join(base_dir, props["path"]),
|
||||
"script": props["script"],
|
||||
"parser": props.get("parser"),
|
||||
"parse_known": props.get("parse_known", False),
|
||||
"help": props.get("help"),
|
||||
"virtualenv": props.get("virtualenv", True),
|
||||
"install": props.get("install", []),
|
||||
"requirements": [os.path.join(base_dir, item)
|
||||
for item in props.get("requirements", [])]
|
||||
}
|
||||
return rv
|
||||
|
||||
|
||||
def parse_args(argv, commands):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--venv", action="store", help="Path to an existing virtualenv to use")
|
||||
parser.add_argument("--debug", action="store_true", help="Run the debugger in case of an exception")
|
||||
subparsers = parser.add_subparsers(dest="command")
|
||||
for command, props in iteritems(commands):
|
||||
sub_parser = subparsers.add_parser(command, help=props["help"], add_help=False)
|
||||
|
||||
args, extra = parser.parse_known_args(argv)
|
||||
|
||||
return args, extra
|
||||
|
||||
|
||||
def import_command(prog, command, props):
|
||||
# This currently requires the path to be a module,
|
||||
# which probably isn't ideal but it means that relative
|
||||
# imports inside the script work
|
||||
rel_path = os.path.relpath(props["path"], wpt_root)
|
||||
|
||||
parts = os.path.splitext(rel_path)[0].split(os.path.sep)
|
||||
|
||||
mod_name = ".".join(parts)
|
||||
|
||||
mod = __import__(mod_name)
|
||||
for part in parts[1:]:
|
||||
mod = getattr(mod, part)
|
||||
|
||||
script = getattr(mod, props["script"])
|
||||
if props["parser"] is not None:
|
||||
parser = getattr(mod, props["parser"])()
|
||||
parser.prog = "%s %s" % (os.path.basename(prog), command)
|
||||
else:
|
||||
parser = None
|
||||
|
||||
return script, parser
|
||||
|
||||
|
||||
def setup_virtualenv(path, props):
|
||||
if path is None:
|
||||
path = os.path.join(wpt_root, "_venv")
|
||||
venv = virtualenv.Virtualenv(path)
|
||||
venv.start()
|
||||
for name in props["install"]:
|
||||
venv.install(name)
|
||||
for path in props["requirements"]:
|
||||
venv.install_requirements(path)
|
||||
return venv
|
||||
|
||||
|
||||
def main(prog=None, argv=None):
|
||||
if prog is None:
|
||||
prog = sys.argv[0]
|
||||
if argv is None:
|
||||
argv = sys.argv[1:]
|
||||
|
||||
commands = load_commands()
|
||||
|
||||
main_args, command_args = parse_args(argv, commands)
|
||||
|
||||
if not(len(argv) and argv[0] in commands):
|
||||
sys.exit(1)
|
||||
|
||||
command = main_args.command
|
||||
props = commands[command]
|
||||
venv = None
|
||||
if props["virtualenv"]:
|
||||
venv = setup_virtualenv(main_args.venv, props)
|
||||
script, parser = import_command(prog, command, props)
|
||||
if parser:
|
||||
if props["parse_known"]:
|
||||
kwargs, extras = parser.parse_known_args(command_args)
|
||||
extras = (extras,)
|
||||
kwargs = vars(kwargs)
|
||||
else:
|
||||
extras = ()
|
||||
kwargs = vars(parser.parse_args(command_args))
|
||||
else:
|
||||
extras = ()
|
||||
kwargs = {}
|
||||
|
||||
if venv is not None:
|
||||
args = (venv,) + extras
|
||||
else:
|
||||
args = extras
|
||||
|
||||
if script:
|
||||
try:
|
||||
rv = script(*args, **kwargs)
|
||||
if rv is not None:
|
||||
sys.exit(int(rv))
|
||||
except Exception:
|
||||
if main_args.debug:
|
||||
import pdb
|
||||
pdb.post_mortem()
|
||||
else:
|
||||
raise
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,308 +0,0 @@
|
|||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tarfile
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
import localpaths
|
||||
from browserutils import browser, utils, virtualenv
|
||||
logger = None
|
||||
|
||||
wpt_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
|
||||
class WptrunnerHelpAction(argparse.Action):
|
||||
def __init__(self,
|
||||
option_strings,
|
||||
dest=argparse.SUPPRESS,
|
||||
default=argparse.SUPPRESS,
|
||||
help=None):
|
||||
super(WptrunnerHelpAction, self).__init__(
|
||||
option_strings=option_strings,
|
||||
dest=dest,
|
||||
default=default,
|
||||
nargs=0,
|
||||
help=help)
|
||||
|
||||
def __call__(self, parser, namespace, values, option_string=None):
|
||||
from wptrunner import wptcommandline
|
||||
wptparser = wptcommandline.create_parser()
|
||||
wptparser.usage = parser.usage
|
||||
wptparser.print_help()
|
||||
parser.exit()
|
||||
|
||||
|
||||
def create_parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("product", action="store",
|
||||
help="Browser to run tests in")
|
||||
parser.add_argument("tests", action="store", nargs="*",
|
||||
help="Path to tests to run")
|
||||
parser.add_argument("wptrunner_args", nargs=argparse.REMAINDER,
|
||||
help="Arguments to pass through to wptrunner")
|
||||
parser.add_argument("--yes", "-y", dest="prompt", action="store_false", default=True,
|
||||
help="Don't prompt before installing components")
|
||||
parser.add_argument("--wptrunner-help",
|
||||
action=WptrunnerHelpAction, default=argparse.SUPPRESS,
|
||||
help="Print wptrunner help")
|
||||
return parser
|
||||
|
||||
|
||||
def exit(msg):
|
||||
logger.error(msg)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def args_general(kwargs):
|
||||
kwargs.set_if_none("tests_root", wpt_root)
|
||||
kwargs.set_if_none("metadata_root", wpt_root)
|
||||
kwargs.set_if_none("manifest_update", True)
|
||||
|
||||
if kwargs["ssl_type"] == "openssl":
|
||||
if not find_executable(kwargs["openssl_binary"]):
|
||||
if os.uname()[0] == "Windows":
|
||||
exit("""OpenSSL binary not found. If you need HTTPS tests, install OpenSSL from
|
||||
|
||||
https://slproweb.com/products/Win32OpenSSL.html
|
||||
|
||||
Ensuring that libraries are added to /bin and add the resulting bin directory to
|
||||
your PATH.
|
||||
|
||||
Otherwise run with --ssl-type=none""")
|
||||
else:
|
||||
exit("""OpenSSL not found. If you don't need HTTPS support run with --ssl-type=none,
|
||||
otherwise install OpenSSL and ensure that it's on your $PATH.""")
|
||||
|
||||
|
||||
def check_environ(product):
|
||||
if product != "firefox":
|
||||
expected_hosts = set(["web-platform.test",
|
||||
"www.web-platform.test",
|
||||
"www1.web-platform.test",
|
||||
"www2.web-platform.test",
|
||||
"xn--n8j6ds53lwwkrqhv28a.web-platform.test",
|
||||
"xn--lve-6lad.web-platform.test",
|
||||
"nonexistent-origin.web-platform.test"])
|
||||
if platform.uname()[0] != "Windows":
|
||||
hosts_path = "/etc/hosts"
|
||||
else:
|
||||
hosts_path = "C:\Windows\System32\drivers\etc\hosts"
|
||||
with open(hosts_path, "r") as f:
|
||||
for line in f:
|
||||
line = line.split("#", 1)[0].strip()
|
||||
parts = line.split()
|
||||
if len(parts) == 2:
|
||||
host = parts[1]
|
||||
expected_hosts.discard(host)
|
||||
if expected_hosts:
|
||||
exit("""Missing hosts file configuration for %s.
|
||||
See README.md for more details.""" % ",".join(expected_hosts))
|
||||
|
||||
def prompt_install(component, prompt):
|
||||
if not prompt:
|
||||
return True
|
||||
while True:
|
||||
resp = raw_input("Download and install %s [Y/n]? " % component).strip().lower()
|
||||
if not resp or resp == "y":
|
||||
return True
|
||||
elif resp == "n":
|
||||
return False
|
||||
|
||||
|
||||
def args_firefox(venv, kwargs, firefox, prompt=True):
|
||||
if kwargs["binary"] is None:
|
||||
binary = firefox.find_binary()
|
||||
if binary is None:
|
||||
exit("""Firefox binary not found on $PATH.
|
||||
|
||||
Install Firefox or use --binary to set the binary path""")
|
||||
kwargs["binary"] = binary
|
||||
|
||||
if kwargs["certutil_binary"] is None and kwargs["ssl_type"] != "none":
|
||||
certutil = firefox.find_certutil()
|
||||
|
||||
if certutil is None:
|
||||
# Can't download this for now because it's missing the libnss3 library
|
||||
exit("""Can't find certutil.
|
||||
|
||||
This must be installed using your OS package manager or directly e.g.
|
||||
|
||||
Debian/Ubuntu:
|
||||
sudo apt install libnss3-tools
|
||||
|
||||
macOS/Homebrew:
|
||||
brew install nss
|
||||
|
||||
Others:
|
||||
Download the firefox archive and common.tests.zip archive for your platform
|
||||
from
|
||||
https://archive.mozilla.org/pub/firefox/nightly/latest-mozilla-central/
|
||||
Then extract certutil[.exe] from the tests.zip package and
|
||||
libnss3[.so|.dll|.dynlib] and but the former on your path and the latter on
|
||||
your library path.
|
||||
""")
|
||||
else:
|
||||
print("Using certutil %s" % certutil)
|
||||
|
||||
if certutil is not None:
|
||||
kwargs["certutil_binary"] = certutil
|
||||
else:
|
||||
print("Unable to find or install certutil, setting ssl-type to none")
|
||||
kwargs["ssl_type"] = "none"
|
||||
|
||||
if kwargs["webdriver_binary"] is None and "wdspec" in kwargs["test_types"]:
|
||||
webdriver_binary = firefox.find_webdriver()
|
||||
|
||||
if webdriver_binary is None:
|
||||
install = prompt_install("geckodriver", prompt)
|
||||
|
||||
if install:
|
||||
print("Downloading geckodriver")
|
||||
webdriver_binary = firefox.install_webdriver(dest=venv.bin_path)
|
||||
else:
|
||||
print("Using webdriver binary %s" % webdriver_binary)
|
||||
|
||||
if webdriver_binary:
|
||||
kwargs["webdriver_binary"] = webdriver_binary
|
||||
else:
|
||||
print("Unable to find or install geckodriver, skipping wdspec tests")
|
||||
kwargs["test_types"].remove("wdspec")
|
||||
|
||||
if kwargs["prefs_root"] is None:
|
||||
print("Downloading gecko prefs")
|
||||
prefs_root = firefox.install_prefs(venv.path)
|
||||
kwargs["prefs_root"] = prefs_root
|
||||
|
||||
|
||||
def setup_firefox(venv, kwargs, prompt=True):
|
||||
firefox = browser.Firefox()
|
||||
args_firefox(venv, kwargs, firefox, prompt)
|
||||
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", firefox.requirements))
|
||||
|
||||
|
||||
def args_chrome(venv, kwargs, chrome, prompt=True):
|
||||
if kwargs["webdriver_binary"] is None:
|
||||
webdriver_binary = chrome.find_webdriver()
|
||||
|
||||
if webdriver_binary is None:
|
||||
install = prompt_install("chromedriver", prompt)
|
||||
|
||||
if install:
|
||||
print("Downloading chromedriver")
|
||||
webdriver_binary = chrome.install_webdriver(dest=venv.bin_path)
|
||||
else:
|
||||
print("Using webdriver binary %s" % webdriver_binary)
|
||||
|
||||
if webdriver_binary:
|
||||
kwargs["webdriver_binary"] = webdriver_binary
|
||||
else:
|
||||
exit("Unable to locate or install chromedriver binary")
|
||||
|
||||
|
||||
def setup_chrome(venv, kwargs, prompt=True):
|
||||
chrome = browser.Chrome()
|
||||
args_chrome(venv, kwargs, chrome, prompt)
|
||||
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", chrome.requirements))
|
||||
|
||||
|
||||
def args_edge(venv, kwargs, edge, prompt=True):
|
||||
if kwargs["webdriver_binary"] is None:
|
||||
webdriver_binary = edge.find_webdriver()
|
||||
|
||||
if webdriver_binary is None:
|
||||
exit("""Unable to find WebDriver and we aren't yet clever enough to work out which
|
||||
version to download. Please go to the following URL and install the correct
|
||||
version for your Edge/Windows release somewhere on the %PATH%:
|
||||
|
||||
https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
|
||||
""")
|
||||
kwargs["webdriver_binary"] = webdriver_binary
|
||||
|
||||
|
||||
def setup_edge(venv, kwargs, prompt=True):
|
||||
edge = browser.Edge()
|
||||
args_edge(venv, kwargs, edge, prompt)
|
||||
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", edge.requirements))
|
||||
|
||||
|
||||
def setup_sauce(kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def args_servo(venv, kwargs, servo, prompt=True):
|
||||
if kwargs["binary"] is None:
|
||||
binary = servo.find_binary()
|
||||
|
||||
if binary is None:
|
||||
exit("Unable to find servo binary on the PATH")
|
||||
kwargs["binary"] = binary
|
||||
|
||||
|
||||
def setup_servo(venv, kwargs, prompt=True):
|
||||
servo = browser.Servo()
|
||||
args_servo(venv, kwargs, servo, prompt)
|
||||
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", servo.requirements))
|
||||
|
||||
|
||||
product_setup = {
|
||||
"firefox": setup_firefox,
|
||||
"chrome": setup_chrome,
|
||||
"edge": setup_edge,
|
||||
"servo": setup_servo
|
||||
}
|
||||
|
||||
|
||||
def setup_wptrunner(venv, product, tests, wptrunner_args, prompt=True,):
|
||||
from wptrunner import wptrunner, wptcommandline
|
||||
|
||||
global logger
|
||||
|
||||
wptparser = wptcommandline.create_parser()
|
||||
kwargs = utils.Kwargs(vars(wptparser.parse_args(wptrunner_args)).iteritems())
|
||||
|
||||
wptrunner.setup_logging(kwargs, {"mach": sys.stdout})
|
||||
logger = wptrunner.logger
|
||||
|
||||
kwargs["product"] = product
|
||||
kwargs["test_list"] = tests
|
||||
|
||||
check_environ(product)
|
||||
args_general(kwargs)
|
||||
|
||||
if product not in product_setup:
|
||||
exit("Unsupported product %s" % product)
|
||||
|
||||
product_setup[product](venv, kwargs, prompt)
|
||||
|
||||
wptcommandline.check_args(kwargs)
|
||||
|
||||
wptrunner_path = os.path.join(wpt_root, "tools", "wptrunner")
|
||||
|
||||
venv.install_requirements(os.path.join(wptrunner_path, "requirements.txt"))
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
def main():
|
||||
parser = create_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
venv = virtualenv.Virtualenv(os.path.join(wpt_root, "_venv_%s") % platform.uname()[0])
|
||||
venv.start()
|
||||
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", "requirements.txt"))
|
||||
venv.install("requests")
|
||||
|
||||
kwargs = setup_wptrunner(venv, args.product, args.tests, args.wptrunner_args, prompt=args.prompt)
|
||||
from wptrunner import wptrunner
|
||||
wptrunner.start(**kwargs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import pdb
|
||||
try:
|
||||
main()
|
||||
except:
|
||||
pdb.post_mortem()
|
|
@ -1,4 +1,5 @@
|
|||
html5lib >= 0.99
|
||||
mozinfo >= 0.7
|
||||
mozlog >= 3.3
|
||||
mozlog >= 3.5
|
||||
mozdebug >= 0.1
|
||||
urllib3[secure]
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
mozprocess >= 0.19
|
||||
selenium >= 2.41.0
|
|
@ -25,6 +25,7 @@ module global scope.
|
|||
product_list = ["chrome",
|
||||
"edge",
|
||||
"firefox",
|
||||
"ie",
|
||||
"sauce",
|
||||
"servo",
|
||||
"servodriver"]
|
||||
|
|
|
@ -119,6 +119,10 @@ class Browser(object):
|
|||
with which it should be instantiated"""
|
||||
return ExecutorBrowser, {}
|
||||
|
||||
def check_for_crashes(self):
|
||||
"""Check for crashes that didn't cause the browser process to terminate"""
|
||||
return False
|
||||
|
||||
def log_crash(self, process, test):
|
||||
"""Return a list of dictionaries containing information about crashes that happend
|
||||
in the browser, or an empty list if no crashes occurred"""
|
||||
|
|
|
@ -3,13 +3,15 @@ from ..webdriver_server import ChromeDriverServer
|
|||
from ..executors import executor_kwargs as base_executor_kwargs
|
||||
from ..executors.executorselenium import (SeleniumTestharnessExecutor,
|
||||
SeleniumRefTestExecutor)
|
||||
from ..executors.executorchrome import ChromeDriverWdspecExecutor
|
||||
|
||||
|
||||
__wptrunner__ = {"product": "chrome",
|
||||
"check_args": "check_args",
|
||||
"browser": "ChromeBrowser",
|
||||
"executor": {"testharness": "SeleniumTestharnessExecutor",
|
||||
"reftest": "SeleniumRefTestExecutor"},
|
||||
"reftest": "SeleniumRefTestExecutor",
|
||||
"wdspec": "ChromeDriverWdspecExecutor"},
|
||||
"browser_kwargs": "browser_kwargs",
|
||||
"executor_kwargs": "executor_kwargs",
|
||||
"env_extras": "env_extras",
|
||||
|
@ -44,6 +46,11 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
|||
for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
|
||||
if kwargs[kwarg] is not None:
|
||||
capabilities["chromeOptions"][capability] = kwargs[kwarg]
|
||||
if test_type == "testharness":
|
||||
capabilities["chromeOptions"]["useAutomationExtension"] = False
|
||||
capabilities["chromeOptions"]["excludeSwitches"] = ["enable-automation"]
|
||||
if test_type == "wdspec":
|
||||
capabilities["chromeOptions"]["w3c"] = True
|
||||
executor_kwargs["capabilities"] = capabilities
|
||||
return executor_kwargs
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ class EdgeBrowser(Browser):
|
|||
print self.server.url
|
||||
self.server.start()
|
||||
|
||||
def stop(self):
|
||||
self.server.stop()
|
||||
def stop(self, force=False):
|
||||
self.server.stop(force=force)
|
||||
|
||||
def pid(self):
|
||||
return self.server.pid
|
||||
|
|
|
@ -65,6 +65,7 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
|
|||
return {"binary": kwargs["binary"],
|
||||
"prefs_root": kwargs["prefs_root"],
|
||||
"extra_prefs": kwargs["extra_prefs"],
|
||||
"test_type": test_type,
|
||||
"debug_info": kwargs["debug_info"],
|
||||
"symbols_path": kwargs["symbols_path"],
|
||||
"stackwalk_binary": kwargs["stackwalk_binary"],
|
||||
|
@ -76,7 +77,8 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
|
|||
"timeout_multiplier": get_timeout_multiplier(test_type,
|
||||
run_info_data,
|
||||
**kwargs),
|
||||
"leak_check": kwargs["leak_check"]}
|
||||
"leak_check": kwargs["leak_check"],
|
||||
"stylo_threads": kwargs["stylo_threads"]}
|
||||
|
||||
|
||||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
|
@ -87,10 +89,10 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
|||
executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
|
||||
run_info_data,
|
||||
**kwargs)
|
||||
if test_type == "reftest":
|
||||
executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
|
||||
executor_kwargs["reftest_screenshot"] = kwargs["reftest_screenshot"]
|
||||
if test_type == "wdspec":
|
||||
executor_kwargs["binary"] = kwargs["binary"]
|
||||
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
|
||||
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
|
||||
fxOptions = {}
|
||||
if kwargs["binary"]:
|
||||
fxOptions["binary"] = kwargs["binary"]
|
||||
|
@ -117,11 +119,13 @@ def env_options():
|
|||
|
||||
|
||||
def run_info_extras(**kwargs):
|
||||
return {"e10s": kwargs["gecko_e10s"]}
|
||||
return {"e10s": kwargs["gecko_e10s"],
|
||||
"headless": "MOZ_HEADLESS" in os.environ}
|
||||
|
||||
|
||||
def update_properties():
|
||||
return ["debug", "e10s", "os", "version", "processor", "bits"], {"debug", "e10s"}
|
||||
return (["debug", "stylo", "e10s", "os", "version", "processor", "bits"],
|
||||
{"debug", "e10s", "stylo"})
|
||||
|
||||
|
||||
class FirefoxBrowser(Browser):
|
||||
|
@ -129,13 +133,14 @@ class FirefoxBrowser(Browser):
|
|||
init_timeout = 60
|
||||
shutdown_timeout = 60
|
||||
|
||||
def __init__(self, logger, binary, prefs_root, extra_prefs=None, debug_info=None,
|
||||
def __init__(self, logger, binary, prefs_root, test_type, extra_prefs=None, debug_info=None,
|
||||
symbols_path=None, stackwalk_binary=None, certutil_binary=None,
|
||||
ca_certificate_path=None, e10s=False, stackfix_dir=None,
|
||||
binary_args=None, timeout_multiplier=None, leak_check=False):
|
||||
binary_args=None, timeout_multiplier=None, leak_check=False, stylo_threads=1):
|
||||
Browser.__init__(self, logger)
|
||||
self.binary = binary
|
||||
self.prefs_root = prefs_root
|
||||
self.test_type = test_type
|
||||
self.extra_prefs = extra_prefs
|
||||
self.marionette_port = None
|
||||
self.runner = None
|
||||
|
@ -147,7 +152,7 @@ class FirefoxBrowser(Browser):
|
|||
self.certutil_binary = certutil_binary
|
||||
self.e10s = e10s
|
||||
self.binary_args = binary_args
|
||||
if self.symbols_path and stackfix_dir:
|
||||
if stackfix_dir:
|
||||
self.stack_fixer = get_stack_fixer_function(stackfix_dir,
|
||||
self.symbols_path)
|
||||
else:
|
||||
|
@ -158,6 +163,7 @@ class FirefoxBrowser(Browser):
|
|||
|
||||
self.leak_report_file = None
|
||||
self.leak_check = leak_check
|
||||
self.stylo_threads = stylo_threads
|
||||
|
||||
def settings(self, test):
|
||||
return {"check_leaks": self.leak_check and not test.leaks}
|
||||
|
@ -168,7 +174,10 @@ class FirefoxBrowser(Browser):
|
|||
self.used_ports.add(self.marionette_port)
|
||||
|
||||
env = os.environ.copy()
|
||||
env["MOZ_CRASHREPORTER"] = "1"
|
||||
env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
|
||||
env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
|
||||
env["STYLO_THREADS"] = str(self.stylo_threads)
|
||||
|
||||
locations = ServerLocations(filename=os.path.join(here, "server-locations.txt"))
|
||||
|
||||
|
@ -180,10 +189,14 @@ class FirefoxBrowser(Browser):
|
|||
"dom.disable_open_during_load": False,
|
||||
"network.dns.localDomains": ",".join(hostnames),
|
||||
"network.proxy.type": 0,
|
||||
"places.history.enabled": False})
|
||||
"places.history.enabled": False,
|
||||
"dom.send_after_paint_to_content": True})
|
||||
if self.e10s:
|
||||
self.profile.set_preferences({"browser.tabs.remote.autostart": True})
|
||||
|
||||
if self.test_type == "reftest":
|
||||
self.profile.set_preferences({"layout.interruptible-reflow.enabled": False})
|
||||
|
||||
if self.leak_check and kwargs.get("check_leaks", True):
|
||||
self.leak_report_file = os.path.join(self.profile.profile, "runtests_leaks.log")
|
||||
if os.path.exists(self.leak_report_file):
|
||||
|
@ -297,6 +310,14 @@ class FirefoxBrowser(Browser):
|
|||
assert self.marionette_port is not None
|
||||
return ExecutorBrowser, {"marionette_port": self.marionette_port}
|
||||
|
||||
def check_for_crashes(self):
|
||||
dump_dir = os.path.join(self.profile.profile, "minidumps")
|
||||
|
||||
return bool(mozcrash.check_for_crashes(dump_dir,
|
||||
symbols_path=self.symbols_path,
|
||||
stackwalk_binary=self.stackwalk_binary,
|
||||
quiet=True))
|
||||
|
||||
def log_crash(self, process, test):
|
||||
dump_dir = os.path.join(self.profile.profile, "minidumps")
|
||||
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
from .base import Browser, ExecutorBrowser, require_arg
|
||||
from ..webdriver_server import InternetExplorerDriverServer
|
||||
from ..executors import executor_kwargs as base_executor_kwargs
|
||||
from ..executors.executorselenium import (SeleniumTestharnessExecutor,
|
||||
SeleniumRefTestExecutor)
|
||||
from ..executors.executorinternetexplorer import InternetExplorerDriverWdspecExecutor
|
||||
|
||||
__wptrunner__ = {"product": "ie",
|
||||
"check_args": "check_args",
|
||||
"browser": "InternetExplorerBrowser",
|
||||
"executor": {"testharness": "SeleniumTestharnessExecutor",
|
||||
"reftest": "SeleniumRefTestExecutor",
|
||||
"wdspec": "InternetExplorerDriverWdspecExecutor"},
|
||||
"browser_kwargs": "browser_kwargs",
|
||||
"executor_kwargs": "executor_kwargs",
|
||||
"env_extras": "env_extras",
|
||||
"env_options": "env_options"}
|
||||
|
||||
|
||||
def check_args(**kwargs):
|
||||
require_arg(kwargs, "webdriver_binary")
|
||||
|
||||
def browser_kwargs(test_type, run_info_data, **kwargs):
|
||||
return {"webdriver_binary": kwargs["webdriver_binary"],
|
||||
"webdriver_args": kwargs.get("webdriver_args")}
|
||||
|
||||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs):
|
||||
from selenium.webdriver import DesiredCapabilities
|
||||
|
||||
ieOptions = {}
|
||||
ieOptions["requireWindowFocus"] = True
|
||||
capabilities = {}
|
||||
capabilities["browserName"] = "internet explorer"
|
||||
capabilities["platformName"] = "windows"
|
||||
capabilities["se:ieOptions"] = ieOptions
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
executor_kwargs["close_after_done"] = True
|
||||
executor_kwargs["capabilities"] = capabilities
|
||||
return executor_kwargs
|
||||
|
||||
def env_extras(**kwargs):
|
||||
return []
|
||||
|
||||
def env_options():
|
||||
return {"host": "web-platform.test",
|
||||
"bind_hostname": "true",
|
||||
"supports_debugger": False}
|
||||
|
||||
class InternetExplorerBrowser(Browser):
|
||||
used_ports = set()
|
||||
|
||||
def __init__(self, logger, webdriver_binary, webdriver_args=None):
|
||||
Browser.__init__(self, logger)
|
||||
self.server = InterentExplorerDriverServer(self.logger,
|
||||
binary=webdriver_binary,
|
||||
args=webdriver_args)
|
||||
self.webdriver_host = "localhost"
|
||||
self.webdriver_port = self.server.port
|
||||
|
||||
def start(self, **kwargs):
|
||||
self.server.start()
|
||||
|
||||
def stop(self, force=False):
|
||||
self.server.stop(force=force)
|
||||
|
||||
def pid(self):
|
||||
return self.server.pid
|
||||
|
||||
def is_alive(self):
|
||||
# TODO(ato): This only indicates the server is alive,
|
||||
# and doesn't say anything about whether a browser session
|
||||
# is active.
|
||||
return self.server.is_alive()
|
||||
|
||||
def cleanup(self):
|
||||
self.stop()
|
||||
|
||||
def executor_browser(self):
|
||||
return ExecutorBrowser, {"webdriver_url": self.server.url}
|
|
@ -138,7 +138,7 @@ class SauceConnect():
|
|||
def __enter__(self, options):
|
||||
if not self.sauce_connect_binary:
|
||||
self.temp_dir = tempfile.mkdtemp()
|
||||
get_tar("https://saucelabs.com/downloads/sc-latest-linux.tar.gz", self.temp_dir)
|
||||
get_tar("https://saucelabs.com/downloads/sc-4.4.9-linux.tar.gz", self.temp_dir)
|
||||
self.sauce_connect_binary = glob.glob(os.path.join(self.temp_dir, "sc-*-linux/bin/sc"))[0]
|
||||
|
||||
self.upload_prerun_exec('edge-prerun.bat')
|
||||
|
@ -161,9 +161,9 @@ class SauceConnect():
|
|||
if self.sc_process.returncode is not None and self.sc_process.returncode > 0:
|
||||
raise SauceException("Unable to start Sauce Connect Proxy. Process exited with code %s", self.sc_process.returncode)
|
||||
|
||||
def __exit__(self, *args):
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.sc_process.terminate()
|
||||
if os.path.exists(self.temp_dir):
|
||||
if self.temp_dir and os.path.exists(self.temp_dir):
|
||||
try:
|
||||
shutil.rmtree(self.temp_dir)
|
||||
except OSError:
|
||||
|
|
|
@ -6,7 +6,7 @@ import socket
|
|||
import sys
|
||||
import time
|
||||
|
||||
from mozlog import get_default_logger, handlers
|
||||
from mozlog import get_default_logger, handlers, proxy
|
||||
|
||||
from wptlogging import LogLevelRewriter
|
||||
|
||||
|
@ -117,7 +117,7 @@ class TestEnvironment(object):
|
|||
for port, server in servers:
|
||||
server.kill()
|
||||
for cm in self.env_extras:
|
||||
cm.__exit__()
|
||||
cm.__exit__(exc_type, exc_val, exc_tb)
|
||||
self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
|
||||
self.ssl_env.__exit__(exc_type, exc_val, exc_tb)
|
||||
self.stash.__exit__()
|
||||
|
@ -168,6 +168,8 @@ class TestEnvironment(object):
|
|||
log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
|
||||
server_logger.component_filter = log_filter
|
||||
|
||||
server_logger = proxy.QueuedProxyLogger(server_logger)
|
||||
|
||||
try:
|
||||
#Set as the default logger for wptserve
|
||||
serve.set_logger(server_logger)
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
import hashlib
|
||||
import json
|
||||
import httplib
|
||||
import os
|
||||
import threading
|
||||
import traceback
|
||||
import socket
|
||||
import urlparse
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
@ -9,6 +11,10 @@ from ..testrunner import Stop
|
|||
|
||||
here = os.path.split(__file__)[0]
|
||||
|
||||
# Extra timeout to use after internal test timeout at which the harness
|
||||
# should force a timeout
|
||||
extra_timeout = 5 # seconds
|
||||
|
||||
|
||||
def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
|
||||
timeout_multiplier = kwargs["timeout_multiplier"]
|
||||
|
@ -22,6 +28,11 @@ def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
|
|||
if test_type == "reftest":
|
||||
executor_kwargs["screenshot_cache"] = cache_manager.dict()
|
||||
|
||||
if test_type == "wdspec":
|
||||
executor_kwargs["binary"] = kwargs.get("binary")
|
||||
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
|
||||
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
|
||||
|
||||
return executor_kwargs
|
||||
|
||||
|
||||
|
@ -93,7 +104,7 @@ class TestExecutor(object):
|
|||
convert_result = None
|
||||
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1,
|
||||
debug_info=None):
|
||||
debug_info=None, **kwargs):
|
||||
"""Abstract Base class for object that actually executes the tests in a
|
||||
specific browser. Typically there will be a different TestExecutor
|
||||
subclass for each test type and method of executing tests.
|
||||
|
@ -196,7 +207,7 @@ class RefTestExecutor(TestExecutor):
|
|||
convert_result = reftest_result_converter
|
||||
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
|
||||
debug_info=None):
|
||||
debug_info=None, **kwargs):
|
||||
TestExecutor.__init__(self, browser, server_config,
|
||||
timeout_multiplier=timeout_multiplier,
|
||||
debug_info=debug_info)
|
||||
|
@ -215,6 +226,12 @@ class RefTestImplementation(object):
|
|||
self.screenshot_cache = self.executor.screenshot_cache
|
||||
self.message = None
|
||||
|
||||
def setup(self):
|
||||
pass
|
||||
|
||||
def teardown(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
return self.executor.logger
|
||||
|
@ -304,6 +321,51 @@ class RefTestImplementation(object):
|
|||
|
||||
class WdspecExecutor(TestExecutor):
|
||||
convert_result = pytest_result_converter
|
||||
protocol_cls = None
|
||||
|
||||
def __init__(self, browser, server_config, webdriver_binary,
|
||||
webdriver_args, timeout_multiplier=1, capabilities=None,
|
||||
debug_info=None, **kwargs):
|
||||
self.do_delayed_imports()
|
||||
TestExecutor.__init__(self, browser, server_config,
|
||||
timeout_multiplier=timeout_multiplier,
|
||||
debug_info=debug_info)
|
||||
self.webdriver_binary = webdriver_binary
|
||||
self.webdriver_args = webdriver_args
|
||||
self.timeout_multiplier = timeout_multiplier
|
||||
self.capabilities = capabilities
|
||||
self.protocol = self.protocol_cls(self, browser)
|
||||
|
||||
def is_alive(self):
|
||||
return self.protocol.is_alive
|
||||
|
||||
def on_environment_change(self, new_environment):
|
||||
pass
|
||||
|
||||
def do_test(self, test):
|
||||
timeout = test.timeout * self.timeout_multiplier + extra_timeout
|
||||
|
||||
success, data = WdspecRun(self.do_wdspec,
|
||||
self.protocol.session_config,
|
||||
test.abs_path,
|
||||
timeout).run()
|
||||
|
||||
if success:
|
||||
return self.convert_result(test, data)
|
||||
|
||||
return (test.result_cls(*data), [])
|
||||
|
||||
def do_wdspec(self, session_config, path, timeout):
|
||||
harness_result = ("OK", None)
|
||||
subtest_results = pytestrunner.run(path,
|
||||
self.server_config,
|
||||
session_config,
|
||||
timeout=timeout)
|
||||
return (harness_result, subtest_results)
|
||||
|
||||
def do_delayed_imports(self):
|
||||
global pytestrunner
|
||||
from . import pytestrunner
|
||||
|
||||
|
||||
class Protocol(object):
|
||||
|
@ -323,3 +385,95 @@ class Protocol(object):
|
|||
|
||||
def wait(self):
|
||||
pass
|
||||
|
||||
|
||||
class WdspecRun(object):
|
||||
def __init__(self, func, session, path, timeout):
|
||||
self.func = func
|
||||
self.result = (None, None)
|
||||
self.session = session
|
||||
self.path = path
|
||||
self.timeout = timeout
|
||||
self.result_flag = threading.Event()
|
||||
|
||||
def run(self):
|
||||
"""Runs function in a thread and interrupts it if it exceeds the
|
||||
given timeout. Returns (True, (Result, [SubtestResult ...])) in
|
||||
case of success, or (False, (status, extra information)) in the
|
||||
event of failure.
|
||||
"""
|
||||
|
||||
executor = threading.Thread(target=self._run)
|
||||
executor.start()
|
||||
|
||||
flag = self.result_flag.wait(self.timeout)
|
||||
if self.result[1] is None:
|
||||
self.result = False, ("EXTERNAL-TIMEOUT", None)
|
||||
|
||||
return self.result
|
||||
|
||||
def _run(self):
|
||||
try:
|
||||
self.result = True, self.func(self.session, self.path, self.timeout)
|
||||
except (socket.timeout, IOError):
|
||||
self.result = False, ("CRASH", None)
|
||||
except Exception as e:
|
||||
message = getattr(e, "message")
|
||||
if message:
|
||||
message += "\n"
|
||||
message += traceback.format_exc(e)
|
||||
self.result = False, ("ERROR", message)
|
||||
finally:
|
||||
self.result_flag.set()
|
||||
|
||||
|
||||
class WebDriverProtocol(Protocol):
|
||||
server_cls = None
|
||||
|
||||
def __init__(self, executor, browser):
|
||||
Protocol.__init__(self, executor, browser)
|
||||
self.webdriver_binary = executor.webdriver_binary
|
||||
self.webdriver_args = executor.webdriver_args
|
||||
self.capabilities = self.executor.capabilities
|
||||
self.session_config = None
|
||||
self.server = None
|
||||
|
||||
def setup(self, runner):
|
||||
"""Connect to browser via the HTTP server."""
|
||||
try:
|
||||
self.server = self.server_cls(
|
||||
self.logger,
|
||||
binary=self.webdriver_binary,
|
||||
args=self.webdriver_args)
|
||||
self.server.start(block=False)
|
||||
self.logger.info(
|
||||
"WebDriver HTTP server listening at %s" % self.server.url)
|
||||
self.session_config = {"host": self.server.host,
|
||||
"port": self.server.port,
|
||||
"capabilities": self.capabilities}
|
||||
except Exception:
|
||||
self.logger.error(traceback.format_exc())
|
||||
self.executor.runner.send_message("init_failed")
|
||||
else:
|
||||
self.executor.runner.send_message("init_succeeded")
|
||||
|
||||
def teardown(self):
|
||||
if self.server is not None and self.server.is_alive:
|
||||
self.server.stop()
|
||||
|
||||
@property
|
||||
def is_alive(self):
|
||||
"""Test that the connection is still alive.
|
||||
|
||||
Because the remote communication happens over HTTP we need to
|
||||
make an explicit request to the remote. It is allowed for
|
||||
WebDriver spec tests to not have a WebDriver session, since this
|
||||
may be what is tested.
|
||||
|
||||
An HTTP request to an invalid path that results in a 404 is
|
||||
proof enough to us that the server is alive and kicking.
|
||||
"""
|
||||
conn = httplib.HTTPConnection(self.server.host, self.server.port)
|
||||
conn.request("HEAD", self.server.base_path + "invalid")
|
||||
res = conn.getresponse()
|
||||
return res.status == 404
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
from ..webdriver_server import ChromeDriverServer
|
||||
from .base import WdspecExecutor, WebDriverProtocol
|
||||
|
||||
|
||||
class ChromeDriverProtocol(WebDriverProtocol):
|
||||
server_cls = ChromeDriverServer
|
||||
|
||||
|
||||
class ChromeDriverWdspecExecutor(WdspecExecutor):
|
||||
protocol_cls = ChromeDriverProtocol
|
|
@ -0,0 +1,10 @@
|
|||
from ..webdriver_server import InternetExplorerDriverServer
|
||||
from .base import WdspecExecutor, WebDriverProtocol
|
||||
|
||||
|
||||
class InternetExplorerDriverProtocol(WebDriverProtocol):
|
||||
server_cls = InternetExplorerDriverServer
|
||||
|
||||
|
||||
class InternetExplorerDriverWdspecExecutor(WdspecExecutor):
|
||||
protocol_cls = InternetExplorerDriverProtocol
|
|
@ -1,15 +1,9 @@
|
|||
import hashlib
|
||||
import httplib
|
||||
import os
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import urlparse
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
|
||||
from ..wpttest import WdspecResult, WdspecSubtestResult
|
||||
|
||||
errors = None
|
||||
marionette = None
|
||||
|
@ -23,16 +17,17 @@ from .base import (ExecutorException,
|
|||
RefTestImplementation,
|
||||
TestExecutor,
|
||||
TestharnessExecutor,
|
||||
WdspecExecutor,
|
||||
WdspecRun,
|
||||
WebDriverProtocol,
|
||||
extra_timeout,
|
||||
testharness_result_converter,
|
||||
reftest_result_converter,
|
||||
strip_server,
|
||||
WdspecExecutor)
|
||||
strip_server)
|
||||
|
||||
from ..testrunner import Stop
|
||||
from ..webdriver_server import GeckoDriverServer
|
||||
|
||||
# Extra timeout to use after internal test timeout at which the harness
|
||||
# should force a timeout
|
||||
extra_timeout = 5 # seconds
|
||||
|
||||
|
||||
def do_delayed_imports():
|
||||
|
@ -176,9 +171,13 @@ class MarionetteProtocol(Protocol):
|
|||
if socket_timeout:
|
||||
self.marionette.timeout.script = socket_timeout / 2
|
||||
|
||||
self.marionette.switch_to_window(self.runner_handle)
|
||||
while True:
|
||||
try:
|
||||
self.marionette.execute_async_script("")
|
||||
except errors.NoSuchWindowException:
|
||||
# The window closed
|
||||
break
|
||||
except errors.ScriptTimeoutException:
|
||||
self.logger.debug("Script timed out")
|
||||
pass
|
||||
|
@ -186,7 +185,7 @@ class MarionetteProtocol(Protocol):
|
|||
self.logger.debug("Socket closed")
|
||||
break
|
||||
except Exception as e:
|
||||
self.logger.error(traceback.format_exc(e))
|
||||
self.logger.warning(traceback.format_exc(e))
|
||||
break
|
||||
|
||||
def on_environment_change(self, old_environment, new_environment):
|
||||
|
@ -283,57 +282,6 @@ class MarionetteProtocol(Protocol):
|
|||
self.marionette.execute_script(script)
|
||||
|
||||
|
||||
class RemoteMarionetteProtocol(Protocol):
|
||||
def __init__(self, executor, browser):
|
||||
do_delayed_imports()
|
||||
Protocol.__init__(self, executor, browser)
|
||||
self.webdriver_binary = executor.webdriver_binary
|
||||
self.webdriver_args = executor.webdriver_args
|
||||
self.capabilities = self.executor.capabilities
|
||||
self.session_config = None
|
||||
self.server = None
|
||||
|
||||
def setup(self, runner):
|
||||
"""Connect to browser via the Marionette HTTP server."""
|
||||
try:
|
||||
self.server = GeckoDriverServer(
|
||||
self.logger,
|
||||
binary=self.webdriver_binary,
|
||||
args=self.webdriver_args)
|
||||
self.server.start(block=False)
|
||||
self.logger.info(
|
||||
"WebDriver HTTP server listening at %s" % self.server.url)
|
||||
self.session_config = {"host": self.server.host,
|
||||
"port": self.server.port,
|
||||
"capabilities": self.capabilities}
|
||||
except Exception:
|
||||
self.logger.error(traceback.format_exc())
|
||||
self.executor.runner.send_message("init_failed")
|
||||
else:
|
||||
self.executor.runner.send_message("init_succeeded")
|
||||
|
||||
def teardown(self):
|
||||
if self.server is not None and self.server.is_alive:
|
||||
self.server.stop()
|
||||
|
||||
@property
|
||||
def is_alive(self):
|
||||
"""Test that the Marionette connection is still alive.
|
||||
|
||||
Because the remote communication happens over HTTP we need to
|
||||
make an explicit request to the remote. It is allowed for
|
||||
WebDriver spec tests to not have a WebDriver session, since this
|
||||
may be what is tested.
|
||||
|
||||
An HTTP request to an invalid path that results in a 404 is
|
||||
proof enough to us that the server is alive and kicking.
|
||||
"""
|
||||
conn = httplib.HTTPConnection(self.server.host, self.server.port)
|
||||
conn.request("HEAD", self.server.base_path + "invalid")
|
||||
res = conn.getresponse()
|
||||
return res.status == 404
|
||||
|
||||
|
||||
class ExecuteAsyncScriptRun(object):
|
||||
def __init__(self, logger, func, protocol, url, timeout):
|
||||
self.logger = logger
|
||||
|
@ -375,9 +323,17 @@ class ExecuteAsyncScriptRun(object):
|
|||
wait_timeout = None
|
||||
|
||||
flag = self.result_flag.wait(wait_timeout)
|
||||
if self.result[1] is None:
|
||||
|
||||
if self.result == (None, None):
|
||||
self.logger.debug("Timed out waiting for a result")
|
||||
self.result = False, ("EXTERNAL-TIMEOUT", None)
|
||||
elif self.result[1] is None:
|
||||
# We didn't get any data back from the test, so check if the
|
||||
# browser is still responsive
|
||||
if self.protocol.is_alive:
|
||||
self.result = False, ("ERROR", None)
|
||||
else:
|
||||
self.result = False, ("CRASH", None)
|
||||
return self.result
|
||||
|
||||
def _run(self):
|
||||
|
@ -467,8 +423,9 @@ class MarionetteTestharnessExecutor(TestharnessExecutor):
|
|||
class MarionetteRefTestExecutor(RefTestExecutor):
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1,
|
||||
screenshot_cache=None, close_after_done=True,
|
||||
debug_info=None, **kwargs):
|
||||
|
||||
debug_info=None, reftest_internal=False,
|
||||
reftest_screenshot="unexpected",
|
||||
group_metadata=None, **kwargs):
|
||||
"""Marionette-based executor for reftests"""
|
||||
RefTestExecutor.__init__(self,
|
||||
browser,
|
||||
|
@ -477,16 +434,36 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
|||
timeout_multiplier=timeout_multiplier,
|
||||
debug_info=debug_info)
|
||||
self.protocol = MarionetteProtocol(self, browser)
|
||||
self.implementation = RefTestImplementation(self)
|
||||
self.implementation = (InternalRefTestImplementation
|
||||
if reftest_internal
|
||||
else RefTestImplementation)(self)
|
||||
self.implementation_kwargs = ({"screenshot": reftest_screenshot} if
|
||||
reftest_internal else {})
|
||||
|
||||
self.close_after_done = close_after_done
|
||||
self.has_window = False
|
||||
self.original_pref_values = {}
|
||||
self.group_metadata = group_metadata
|
||||
|
||||
with open(os.path.join(here, "reftest.js")) as f:
|
||||
self.script = f.read()
|
||||
with open(os.path.join(here, "reftest-wait.js")) as f:
|
||||
with open(os.path.join(here, "reftest-wait_marionette.js")) as f:
|
||||
self.wait_script = f.read()
|
||||
|
||||
def setup(self, runner):
|
||||
super(self.__class__, self).setup(runner)
|
||||
self.implementation.setup(**self.implementation_kwargs)
|
||||
|
||||
def teardown(self):
|
||||
try:
|
||||
self.implementation.teardown()
|
||||
handle = self.protocol.marionette.window_handles[0]
|
||||
self.protocol.marionette.switch_to_window(handle)
|
||||
super(self.__class__, self).teardown()
|
||||
except Exception as e:
|
||||
# Ignore errors during teardown
|
||||
self.logger.warning(traceback.format_exc(e))
|
||||
|
||||
def is_alive(self):
|
||||
return self.protocol.is_alive
|
||||
|
||||
|
@ -494,16 +471,17 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
|||
self.protocol.on_environment_change(self.last_environment, new_environment)
|
||||
|
||||
def do_test(self, test):
|
||||
if self.close_after_done and self.has_window:
|
||||
self.protocol.marionette.close()
|
||||
self.protocol.marionette.switch_to_window(
|
||||
self.protocol.marionette.window_handles[-1])
|
||||
self.has_window = False
|
||||
if not isinstance(self.implementation, InternalRefTestImplementation):
|
||||
if self.close_after_done and self.has_window:
|
||||
self.protocol.marionette.close()
|
||||
self.protocol.marionette.switch_to_window(
|
||||
self.protocol.marionette.window_handles[-1])
|
||||
self.has_window = False
|
||||
|
||||
if not self.has_window:
|
||||
self.protocol.marionette.execute_script(self.script)
|
||||
self.protocol.marionette.switch_to_window(self.protocol.marionette.window_handles[-1])
|
||||
self.has_window = True
|
||||
if not self.has_window:
|
||||
self.protocol.marionette.execute_script(self.script)
|
||||
self.protocol.marionette.switch_to_window(self.protocol.marionette.window_handles[-1])
|
||||
self.has_window = True
|
||||
|
||||
result = self.implementation.run_test(test)
|
||||
return self.convert_result(test, result)
|
||||
|
@ -518,10 +496,10 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
|||
test_url = self.test_url(test)
|
||||
|
||||
return ExecuteAsyncScriptRun(self.logger,
|
||||
self._screenshot,
|
||||
self.protocol,
|
||||
test_url,
|
||||
timeout).run()
|
||||
self._screenshot,
|
||||
self.protocol,
|
||||
test_url,
|
||||
timeout).run()
|
||||
|
||||
def _screenshot(self, marionette, url, timeout):
|
||||
marionette.navigate(url)
|
||||
|
@ -536,86 +514,55 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
|||
return screenshot
|
||||
|
||||
|
||||
class WdspecRun(object):
|
||||
def __init__(self, func, session, path, timeout):
|
||||
self.func = func
|
||||
self.result = (None, None)
|
||||
self.session = session
|
||||
self.path = path
|
||||
self.timeout = timeout
|
||||
self.result_flag = threading.Event()
|
||||
class InternalRefTestImplementation(object):
|
||||
def __init__(self, executor):
|
||||
self.timeout_multiplier = executor.timeout_multiplier
|
||||
self.executor = executor
|
||||
|
||||
def run(self):
|
||||
"""Runs function in a thread and interrupts it if it exceeds the
|
||||
given timeout. Returns (True, (Result, [SubtestResult ...])) in
|
||||
case of success, or (False, (status, extra information)) in the
|
||||
event of failure.
|
||||
"""
|
||||
@property
|
||||
def logger(self):
|
||||
return self.executor.logger
|
||||
|
||||
executor = threading.Thread(target=self._run)
|
||||
executor.start()
|
||||
def setup(self, screenshot="unexpected"):
|
||||
data = {"screenshot": screenshot}
|
||||
if self.executor.group_metadata is not None:
|
||||
data["urlCount"] = {urlparse.urljoin(self.executor.server_url(key[0]), key[1]):value
|
||||
for key, value in self.executor.group_metadata.get("url_count", {}).iteritems()
|
||||
if value > 1}
|
||||
self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CHROME)
|
||||
self.executor.protocol.marionette._send_message("reftest:setup", data)
|
||||
|
||||
flag = self.result_flag.wait(self.timeout)
|
||||
if self.result[1] is None:
|
||||
self.result = False, ("EXTERNAL-TIMEOUT", None)
|
||||
def run_test(self, test):
|
||||
viewport_size = test.viewport_size
|
||||
dpi = test.dpi
|
||||
|
||||
return self.result
|
||||
references = self.get_references(test)
|
||||
rv = self.executor.protocol.marionette._send_message("reftest:run",
|
||||
{"test": self.executor.test_url(test),
|
||||
"references": references,
|
||||
"expected": test.expected(),
|
||||
"timeout": test.timeout * 1000})["value"]
|
||||
return rv
|
||||
|
||||
def _run(self):
|
||||
def get_references(self, node):
|
||||
rv = []
|
||||
for item, relation in node.references:
|
||||
rv.append([self.executor.test_url(item), self.get_references(item), relation])
|
||||
return rv
|
||||
|
||||
def teardown(self):
|
||||
try:
|
||||
self.result = True, self.func(self.session, self.path, self.timeout)
|
||||
except (socket.timeout, IOError):
|
||||
self.result = False, ("CRASH", None)
|
||||
self.executor.protocol.marionette._send_message("reftest:teardown", {})
|
||||
self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CONTENT)
|
||||
except Exception as e:
|
||||
message = getattr(e, "message")
|
||||
if message:
|
||||
message += "\n"
|
||||
message += traceback.format_exc(e)
|
||||
self.result = False, ("ERROR", message)
|
||||
finally:
|
||||
self.result_flag.set()
|
||||
# Ignore errors during teardown
|
||||
self.logger.warning(traceback.traceback.format_exc(e))
|
||||
|
||||
|
||||
|
||||
class GeckoDriverProtocol(WebDriverProtocol):
|
||||
server_cls = GeckoDriverServer
|
||||
|
||||
|
||||
class MarionetteWdspecExecutor(WdspecExecutor):
|
||||
def __init__(self, browser, server_config, webdriver_binary,
|
||||
timeout_multiplier=1, close_after_done=True, debug_info=None,
|
||||
capabilities=None, webdriver_args=None, binary=None):
|
||||
self.do_delayed_imports()
|
||||
WdspecExecutor.__init__(self, browser, server_config,
|
||||
timeout_multiplier=timeout_multiplier,
|
||||
debug_info=debug_info)
|
||||
self.webdriver_binary = webdriver_binary
|
||||
self.webdriver_args = webdriver_args + ["--binary", binary]
|
||||
self.capabilities = capabilities
|
||||
self.protocol = RemoteMarionetteProtocol(self, browser)
|
||||
|
||||
def is_alive(self):
|
||||
return self.protocol.is_alive
|
||||
|
||||
def on_environment_change(self, new_environment):
|
||||
pass
|
||||
|
||||
def do_test(self, test):
|
||||
timeout = test.timeout * self.timeout_multiplier + extra_timeout
|
||||
|
||||
success, data = WdspecRun(self.do_wdspec,
|
||||
self.protocol.session_config,
|
||||
test.abs_path,
|
||||
timeout).run()
|
||||
|
||||
if success:
|
||||
return self.convert_result(test, data)
|
||||
|
||||
return (test.result_cls(*data), [])
|
||||
|
||||
def do_wdspec(self, session_config, path, timeout):
|
||||
harness_result = ("OK", None)
|
||||
subtest_results = pytestrunner.run(path,
|
||||
self.server_config,
|
||||
session_config,
|
||||
timeout=timeout)
|
||||
return (harness_result, subtest_results)
|
||||
|
||||
def do_delayed_imports(self):
|
||||
global pytestrunner
|
||||
from . import pytestrunner
|
||||
protocol_cls = GeckoDriverProtocol
|
||||
|
|
|
@ -7,14 +7,11 @@ import traceback
|
|||
import urlparse
|
||||
import uuid
|
||||
|
||||
from .base import (ExecutorException,
|
||||
Protocol,
|
||||
from .base import (Protocol,
|
||||
RefTestExecutor,
|
||||
RefTestImplementation,
|
||||
TestExecutor,
|
||||
TestharnessExecutor,
|
||||
testharness_result_converter,
|
||||
reftest_result_converter,
|
||||
extra_timeout,
|
||||
strip_server)
|
||||
from ..testrunner import Stop
|
||||
|
||||
|
@ -24,7 +21,6 @@ webdriver = None
|
|||
exceptions = None
|
||||
RemoteConnection = None
|
||||
|
||||
extra_timeout = 5
|
||||
|
||||
def do_delayed_imports():
|
||||
global webdriver
|
||||
|
@ -34,6 +30,7 @@ def do_delayed_imports():
|
|||
from selenium.common import exceptions
|
||||
from selenium.webdriver.remote.remote_connection import RemoteConnection
|
||||
|
||||
|
||||
class SeleniumProtocol(Protocol):
|
||||
def __init__(self, executor, browser, capabilities, **kwargs):
|
||||
do_delayed_imports()
|
||||
|
@ -163,7 +160,8 @@ class SeleniumRun(object):
|
|||
|
||||
class SeleniumTestharnessExecutor(TestharnessExecutor):
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1,
|
||||
close_after_done=True, capabilities=None, debug_info=None):
|
||||
close_after_done=True, capabilities=None, debug_info=None,
|
||||
**kwargs):
|
||||
"""Selenium-based executor for testharness.js tests"""
|
||||
TestharnessExecutor.__init__(self, browser, server_config,
|
||||
timeout_multiplier=timeout_multiplier,
|
||||
|
@ -202,10 +200,11 @@ class SeleniumTestharnessExecutor(TestharnessExecutor):
|
|||
"timeout_multiplier": self.timeout_multiplier,
|
||||
"timeout": timeout * 1000})
|
||||
|
||||
|
||||
class SeleniumRefTestExecutor(RefTestExecutor):
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1,
|
||||
screenshot_cache=None, close_after_done=True,
|
||||
debug_info=None, capabilities=None):
|
||||
debug_info=None, capabilities=None, **kwargs):
|
||||
"""Selenium WebDriver-based executor for reftests"""
|
||||
RefTestExecutor.__init__(self,
|
||||
browser,
|
||||
|
|
|
@ -49,7 +49,7 @@ class ServoTestharnessExecutor(ProcessTestExecutor):
|
|||
convert_result = testharness_result_converter
|
||||
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
|
||||
pause_after_test=False):
|
||||
pause_after_test=False, **kwargs):
|
||||
ProcessTestExecutor.__init__(self, browser, server_config,
|
||||
timeout_multiplier=timeout_multiplier,
|
||||
debug_info=debug_info)
|
||||
|
@ -179,7 +179,9 @@ class ServoRefTestExecutor(ProcessTestExecutor):
|
|||
convert_result = reftest_result_converter
|
||||
|
||||
def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
|
||||
screenshot_cache=None, debug_info=None, pause_after_test=False):
|
||||
screenshot_cache=None, debug_info=None, pause_after_test=False,
|
||||
**kwargs):
|
||||
do_delayed_imports()
|
||||
ProcessTestExecutor.__init__(self,
|
||||
browser,
|
||||
server_config,
|
||||
|
|
|
@ -140,7 +140,8 @@ def timeout_func(timeout):
|
|||
|
||||
class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1,
|
||||
close_after_done=True, capabilities=None, debug_info=None):
|
||||
close_after_done=True, capabilities=None, debug_info=None,
|
||||
**kwargs):
|
||||
TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1,
|
||||
debug_info=None)
|
||||
self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
|
||||
|
@ -197,7 +198,8 @@ class TimeoutError(Exception):
|
|||
|
||||
class ServoWebDriverRefTestExecutor(RefTestExecutor):
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1,
|
||||
screenshot_cache=None, capabilities=None, debug_info=None):
|
||||
screenshot_cache=None, capabilities=None, debug_info=None,
|
||||
**kwargs):
|
||||
"""Selenium WebDriver-based executor for reftests"""
|
||||
RefTestExecutor.__init__(self,
|
||||
browser,
|
||||
|
@ -209,7 +211,7 @@ class ServoWebDriverRefTestExecutor(RefTestExecutor):
|
|||
capabilities=capabilities)
|
||||
self.implementation = RefTestImplementation(self)
|
||||
self.timeout = None
|
||||
with open(os.path.join(here, "reftest-wait_servodriver.js")) as f:
|
||||
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
|
||||
self.wait_script = f.read()
|
||||
|
||||
def is_alive(self):
|
||||
|
|
|
@ -56,6 +56,8 @@ def run(path, server_config, session_config, timeout=0):
|
|||
"--verbose", # show each individual subtest
|
||||
"--capture", "no", # enable stdout/stderr from tests
|
||||
"--basetemp", cache, # temporary directory
|
||||
"-p", "no:mozlog", # use the WPT result recorder
|
||||
"-p", "no:cacheprovider", # disable state preservation across invocations
|
||||
path],
|
||||
plugins=plugins)
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
function test(x) {
|
||||
log("classList: " + root.classList);
|
||||
if (!root.classList.contains("reftest-wait")) {
|
||||
observer.disconnect();
|
||||
marionetteScriptFinished();
|
|
@ -1,15 +0,0 @@
|
|||
callback = arguments[arguments.length - 1];
|
||||
|
||||
function check_done() {
|
||||
if (!document.documentElement.classList.contains('reftest-wait')) {
|
||||
callback();
|
||||
} else {
|
||||
setTimeout(check_done, 50);
|
||||
}
|
||||
}
|
||||
|
||||
if (document.readyState === 'complete') {
|
||||
check_done();
|
||||
} else {
|
||||
addEventListener("load", check_done);
|
||||
}
|
|
@ -1,33 +1,44 @@
|
|||
var callback = arguments[arguments.length - 1];
|
||||
|
||||
function test(x) {
|
||||
function root_wait() {
|
||||
if (!root.classList.contains("reftest-wait")) {
|
||||
observer.disconnect();
|
||||
|
||||
// As of 2017-04-05, the Chromium web browser exhibits a rendering bug
|
||||
// (https://bugs.chromium.org/p/chromium/issues/detail?id=708757) that
|
||||
// produces instability during screen capture. The following use of
|
||||
// `requestAnimationFrame` is intended as a short-term workaround, though
|
||||
// it is not guaranteed to resolve the issue.
|
||||
//
|
||||
// For further detail, see:
|
||||
// https://github.com/jugglinmike/chrome-screenshot-race/issues/1
|
||||
|
||||
requestAnimationFrame(function() {
|
||||
requestAnimationFrame(function() {
|
||||
callback();
|
||||
});
|
||||
});
|
||||
if (Document.prototype.hasOwnProperty("fonts")) {
|
||||
document.fonts.ready.then(ready_for_screenshot);
|
||||
} else {
|
||||
// This might take the screenshot too early, depending on whether the
|
||||
// load event is blocked on fonts being loaded. See:
|
||||
// https://github.com/w3c/csswg-drafts/issues/1088
|
||||
ready_for_screenshot();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function ready_for_screenshot() {
|
||||
// As of 2017-04-05, the Chromium web browser exhibits a rendering bug
|
||||
// (https://bugs.chromium.org/p/chromium/issues/detail?id=708757) that
|
||||
// produces instability during screen capture. The following use of
|
||||
// `requestAnimationFrame` is intended as a short-term workaround, though
|
||||
// it is not guaranteed to resolve the issue.
|
||||
//
|
||||
// For further detail, see:
|
||||
// https://github.com/jugglinmike/chrome-screenshot-race/issues/1
|
||||
|
||||
requestAnimationFrame(function() {
|
||||
requestAnimationFrame(function() {
|
||||
callback();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
var root = document.documentElement;
|
||||
var observer = new MutationObserver(test);
|
||||
var observer = new MutationObserver(root_wait);
|
||||
|
||||
observer.observe(root, {attributes: true});
|
||||
|
||||
if (document.readyState != "complete") {
|
||||
onload = test;
|
||||
onload = root_wait;
|
||||
} else {
|
||||
test();
|
||||
root_wait();
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ window.wrappedJSObject.win = window.open("%(abs_url)s", "%(window_id)s");
|
|||
var timer = null;
|
||||
if (%(timeout)s) {
|
||||
timer = setTimeout(function() {
|
||||
log("Timeout fired");
|
||||
window.wrappedJSObject.win.timeout();
|
||||
}, %(timeout)s);
|
||||
}
|
||||
|
|
110
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/font.py
Normal file
110
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/font.py
Normal file
|
@ -0,0 +1,110 @@
|
|||
import ctypes
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
|
||||
from shutil import copy2, rmtree
|
||||
from subprocess import call
|
||||
|
||||
HERE = os.path.split(__file__)[0]
|
||||
SYSTEM = platform.system().lower()
|
||||
|
||||
|
||||
class FontInstaller(object):
|
||||
def __init__(self, font_dir=None, **fonts):
|
||||
self.font_dir = font_dir
|
||||
self.installed_fonts = False
|
||||
self.created_dir = False
|
||||
self.fonts = fonts
|
||||
|
||||
def __enter__(self, options=None):
|
||||
for _, font_path in self.fonts.items():
|
||||
font_name = font_path.split('/')[-1]
|
||||
install = getattr(self, 'install_%s_font' % SYSTEM, None)
|
||||
if not install:
|
||||
logging.warning('Font installation not supported on %s',
|
||||
SYSTEM)
|
||||
return False
|
||||
if install(font_name, font_path):
|
||||
self.installed_fonts = True
|
||||
logging.info('Installed font: %s', font_name)
|
||||
else:
|
||||
logging.warning('Unable to install font: %s', font_name)
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if not self.installed_fonts:
|
||||
return False
|
||||
|
||||
for _, font_path in self.fonts.items():
|
||||
font_name = font_path.split('/')[-1]
|
||||
remove = getattr(self, 'remove_%s_font' % SYSTEM, None)
|
||||
if not remove:
|
||||
logging.warning('Font removal not supported on %s', SYSTEM)
|
||||
return False
|
||||
if remove(font_name, font_path):
|
||||
logging.info('Removed font: %s', font_name)
|
||||
else:
|
||||
logging.warning('Unable to remove font: %s', font_name)
|
||||
|
||||
def install_linux_font(self, font_name, font_path):
|
||||
if not self.font_dir:
|
||||
self.font_dir = os.path.join(os.path.expanduser('~'), '.fonts')
|
||||
if not os.path.exists(self.font_dir):
|
||||
os.makedirs(self.font_dir)
|
||||
self.created_dir = True
|
||||
if not os.path.exists(os.path.join(self.font_dir, font_name)):
|
||||
copy2(font_path, self.font_dir)
|
||||
try:
|
||||
fc_cache_returncode = call('fc-cache')
|
||||
return not fc_cache_returncode
|
||||
except OSError: # If fontconfig doesn't exist, return False
|
||||
logging.error('fontconfig not available on this Linux system.')
|
||||
return False
|
||||
|
||||
def install_darwin_font(self, font_name, font_path):
|
||||
if not self.font_dir:
|
||||
self.font_dir = os.path.join(os.path.expanduser('~'),
|
||||
'Library/Fonts')
|
||||
if not os.path.exists(self.font_dir):
|
||||
os.makedirs(self.font_dir)
|
||||
self.created_dir = True
|
||||
if not os.path.exists(os.path.join(self.font_dir, font_name)):
|
||||
copy2(font_path, self.font_dir)
|
||||
return True
|
||||
|
||||
def install_windows_font(self, _, font_path):
|
||||
hwnd_broadcast = 0xFFFF
|
||||
wm_fontchange = 0x001D
|
||||
|
||||
gdi32 = ctypes.WinDLL('gdi32')
|
||||
if gdi32.AddFontResourceW(font_path):
|
||||
return bool(ctypes.windll.user32.SendMessageW(hwnd_broadcast,
|
||||
wm_fontchange))
|
||||
|
||||
def remove_linux_font(self, font_name, _):
|
||||
if self.created_dir:
|
||||
rmtree(self.font_dir)
|
||||
else:
|
||||
os.remove('%s/%s' % (self.font_dir, font_name))
|
||||
try:
|
||||
fc_cache_returncode = call('fc-cache')
|
||||
return not fc_cache_returncode
|
||||
except OSError: # If fontconfig doesn't exist, return False
|
||||
logging.error('fontconfig not available on this Linux system.')
|
||||
return False
|
||||
|
||||
def remove_darwin_font(self, font_name, _):
|
||||
if self.created_dir:
|
||||
rmtree(self.font_dir)
|
||||
else:
|
||||
os.remove(os.path.join(self.font_dir, font_name))
|
||||
return True
|
||||
|
||||
def remove_windows_font(self, _, font_path):
|
||||
hwnd_broadcast = 0xFFFF
|
||||
wm_fontchange = 0x001D
|
||||
|
||||
gdi32 = ctypes.WinDLL('gdi32')
|
||||
if gdi32.RemoveFontResourceW(font_path):
|
||||
return bool(ctypes.windll.user32.SendMessageW(hwnd_broadcast,
|
||||
wm_fontchange))
|
54
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/formatters.py
Executable file
54
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/formatters.py
Executable file
|
@ -0,0 +1,54 @@
|
|||
import json
|
||||
|
||||
from mozlog.structured.formatters.base import BaseFormatter
|
||||
|
||||
|
||||
class WptreportFormatter(BaseFormatter):
|
||||
"""Formatter that produces results in the format that wpreport expects."""
|
||||
|
||||
def __init__(self):
|
||||
self.raw_results = {}
|
||||
|
||||
def suite_end(self, data):
|
||||
results = {}
|
||||
results["results"] = []
|
||||
for test_name in self.raw_results:
|
||||
result = {"test": test_name}
|
||||
result.update(self.raw_results[test_name])
|
||||
results["results"].append(result)
|
||||
return json.dumps(results)
|
||||
|
||||
def find_or_create_test(self, data):
|
||||
test_name = data["test"]
|
||||
if test_name not in self.raw_results:
|
||||
self.raw_results[test_name] = {
|
||||
"subtests": [],
|
||||
"status": "",
|
||||
"message": None
|
||||
}
|
||||
return self.raw_results[test_name]
|
||||
|
||||
def create_subtest(self, data):
|
||||
test = self.find_or_create_test(data)
|
||||
subtest_name = data["subtest"]
|
||||
|
||||
subtest = {
|
||||
"name": subtest_name,
|
||||
"status": "",
|
||||
"message": None
|
||||
}
|
||||
test["subtests"].append(subtest)
|
||||
|
||||
return subtest
|
||||
|
||||
def test_status(self, data):
|
||||
subtest = self.create_subtest(data)
|
||||
subtest["status"] = data["status"]
|
||||
if "message" in data:
|
||||
subtest["message"] = data["message"]
|
||||
|
||||
def test_end(self, data):
|
||||
test = self.find_or_create_test(data)
|
||||
test["status"] = data["status"]
|
||||
if "message" in data:
|
||||
test["message"] = data["message"]
|
|
@ -29,6 +29,10 @@ class IncludeManifest(ManifestItem):
|
|||
node = DataNode(None)
|
||||
return cls(node)
|
||||
|
||||
def set_defaults(self):
|
||||
if not self.has_key("skip"):
|
||||
self.set("skip", "False")
|
||||
|
||||
def append(self, child):
|
||||
ManifestItem.append(self, child)
|
||||
self.child_map[child.name] = child
|
||||
|
|
|
@ -204,7 +204,7 @@ def write_new_expected(metadata_path, expected_map):
|
|||
dir = os.path.split(path)[0]
|
||||
if not os.path.exists(dir):
|
||||
os.makedirs(dir)
|
||||
with open(path, "w") as f:
|
||||
with open(path, "wb") as f:
|
||||
f.write(manifest_str)
|
||||
|
||||
|
||||
|
|
|
@ -341,6 +341,7 @@ class TestFilter(object):
|
|||
self.manifest = manifestinclude.get_manifest(manifest_path)
|
||||
else:
|
||||
self.manifest = manifestinclude.IncludeManifest.create()
|
||||
self.manifest.set_defaults()
|
||||
|
||||
if include:
|
||||
self.manifest.set("skip", "true")
|
||||
|
@ -413,7 +414,7 @@ class ManifestLoader(object):
|
|||
except manifest.ManifestVersionMismatch:
|
||||
manifest_file = manifest.Manifest(url_base)
|
||||
|
||||
manifest_update.update(tests_path, manifest_file, True)
|
||||
manifest_update.update(tests_path, manifest_file, True)
|
||||
|
||||
manifest.write(manifest_file, manifest_path)
|
||||
|
||||
|
@ -564,74 +565,77 @@ class TestLoader(object):
|
|||
class TestSource(object):
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
@abstractmethod
|
||||
def queue_tests(self, test_queue):
|
||||
pass
|
||||
def __init__(self, test_queue):
|
||||
self.test_queue = test_queue
|
||||
self.current_group = None
|
||||
self.current_metadata = None
|
||||
|
||||
@abstractmethod
|
||||
def requeue_test(self, test):
|
||||
#@classmethod (doesn't compose with @abstractmethod)
|
||||
def make_queue(cls, tests, **kwargs):
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
def group(self):
|
||||
if not self.current_group or len(self.current_group) == 0:
|
||||
try:
|
||||
self.current_group, self.current_metadata = self.test_queue.get(block=False)
|
||||
except Empty:
|
||||
return None, None
|
||||
return self.current_group, self.current_metadata
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
class GroupedSource(TestSource):
|
||||
@classmethod
|
||||
def new_group(cls, state, test, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def make_queue(cls, tests, **kwargs):
|
||||
test_queue = Queue()
|
||||
groups = []
|
||||
|
||||
state = {}
|
||||
|
||||
for test in tests:
|
||||
if cls.new_group(state, test, **kwargs):
|
||||
groups.append((deque(), {}))
|
||||
|
||||
group, metadata = groups[-1]
|
||||
group.append(test)
|
||||
test.update_metadata(metadata)
|
||||
|
||||
for item in groups:
|
||||
test_queue.put(item)
|
||||
return test_queue
|
||||
|
||||
|
||||
class SingleTestSource(TestSource):
|
||||
def __init__(self, test_queue):
|
||||
self.test_queue = test_queue
|
||||
|
||||
@classmethod
|
||||
def queue_tests(cls, test_queue, test_type, tests):
|
||||
for test in tests[test_type]:
|
||||
test_queue.put(test)
|
||||
def make_queue(cls, tests, **kwargs):
|
||||
test_queue = Queue()
|
||||
processes = kwargs["processes"]
|
||||
queues = [deque([]) for _ in xrange(processes)]
|
||||
metadatas = [{} for _ in xrange(processes)]
|
||||
for test in tests:
|
||||
idx = hash(test.id) % processes
|
||||
group = queues[idx]
|
||||
metadata = metadatas[idx]
|
||||
group.append(test)
|
||||
test.update_metadata(metadata)
|
||||
|
||||
def get_queue(self):
|
||||
if self.test_queue.empty():
|
||||
return None
|
||||
return self.test_queue
|
||||
for item in zip(queues, metadatas):
|
||||
test_queue.put(item)
|
||||
|
||||
def requeue_test(self, test):
|
||||
self.test_queue.put(test)
|
||||
return test_queue
|
||||
|
||||
class PathGroupedSource(TestSource):
|
||||
def __init__(self, test_queue):
|
||||
self.test_queue = test_queue
|
||||
self.current_queue = None
|
||||
|
||||
class PathGroupedSource(GroupedSource):
|
||||
@classmethod
|
||||
def queue_tests(cls, test_queue, test_type, tests, depth=None):
|
||||
def new_group(cls, state, test, **kwargs):
|
||||
depth = kwargs.get("depth")
|
||||
if depth is True:
|
||||
depth = None
|
||||
|
||||
prev_path = None
|
||||
group = None
|
||||
|
||||
for test in tests[test_type]:
|
||||
path = urlparse.urlsplit(test.url).path.split("/")[1:-1][:depth]
|
||||
if path != prev_path:
|
||||
group = []
|
||||
test_queue.put(group)
|
||||
prev_path = path
|
||||
|
||||
group.append(test)
|
||||
|
||||
def get_queue(self):
|
||||
if not self.current_queue or self.current_queue.empty():
|
||||
try:
|
||||
data = self.test_queue.get(block=True, timeout=1)
|
||||
self.current_queue = Queue()
|
||||
for item in data:
|
||||
self.current_queue.put(item)
|
||||
except Empty:
|
||||
return None
|
||||
return self.current_queue
|
||||
|
||||
def requeue_test(self, test):
|
||||
self.current_queue.put(test)
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
if self.current_queue:
|
||||
self.current_queue.close()
|
||||
path = urlparse.urlsplit(test.url).path.split("/")[1:-1][:depth]
|
||||
rv = path != state.get("prev_path")
|
||||
state["prev_path"] = path
|
||||
return rv
|
||||
|
|
|
@ -147,8 +147,6 @@ def next_manager_number():
|
|||
|
||||
|
||||
class BrowserManager(object):
|
||||
init_lock = threading.Lock()
|
||||
|
||||
def __init__(self, logger, browser, command_queue, no_timeout=False):
|
||||
self.logger = logger
|
||||
self.browser = browser
|
||||
|
@ -173,34 +171,31 @@ class BrowserManager(object):
|
|||
"""Launch the browser that is being tested,
|
||||
and the TestRunner process that will run the tests."""
|
||||
# It seems that this lock is helpful to prevent some race that otherwise
|
||||
# sometimes stops the spawned processes initalising correctly, and
|
||||
# sometimes stops the spawned processes initialising correctly, and
|
||||
# leaves this thread hung
|
||||
if self.init_timer is not None:
|
||||
self.init_timer.cancel()
|
||||
|
||||
self.logger.debug("Init called, starting browser and runner")
|
||||
|
||||
with self.init_lock:
|
||||
# Guard against problems initialising the browser or the browser
|
||||
# remote control method
|
||||
if not self.no_timeout:
|
||||
self.init_timer = threading.Timer(self.browser.init_timeout,
|
||||
self.init_timeout)
|
||||
try:
|
||||
if self.init_timer is not None:
|
||||
self.init_timer.start()
|
||||
self.logger.debug("Starting browser with settings %r" % self.browser_settings)
|
||||
self.browser.start(**self.browser_settings)
|
||||
self.browser_pid = self.browser.pid()
|
||||
except:
|
||||
self.logger.warning("Failure during init %s" % traceback.format_exc())
|
||||
if self.init_timer is not None:
|
||||
self.init_timer.cancel()
|
||||
self.logger.error(traceback.format_exc())
|
||||
succeeded = False
|
||||
else:
|
||||
succeeded = True
|
||||
self.started = True
|
||||
if not self.no_timeout:
|
||||
self.init_timer = threading.Timer(self.browser.init_timeout,
|
||||
self.init_timeout)
|
||||
try:
|
||||
if self.init_timer is not None:
|
||||
self.init_timer.start()
|
||||
self.logger.debug("Starting browser with settings %r" % self.browser_settings)
|
||||
self.browser.start(**self.browser_settings)
|
||||
self.browser_pid = self.browser.pid()
|
||||
except:
|
||||
self.logger.warning("Failure during init %s" % traceback.format_exc())
|
||||
if self.init_timer is not None:
|
||||
self.init_timer.cancel()
|
||||
self.logger.error(traceback.format_exc())
|
||||
succeeded = False
|
||||
else:
|
||||
succeeded = True
|
||||
self.started = True
|
||||
|
||||
return succeeded
|
||||
|
||||
|
@ -228,6 +223,9 @@ class BrowserManager(object):
|
|||
self.init_timer.cancel()
|
||||
self.browser.cleanup()
|
||||
|
||||
def check_for_crashes(self):
|
||||
self.browser.check_for_crashes()
|
||||
|
||||
def log_crash(self, test_id):
|
||||
self.browser.log_crash(process=self.browser_pid, test=test_id)
|
||||
|
||||
|
@ -237,10 +235,10 @@ class BrowserManager(object):
|
|||
|
||||
class _RunnerManagerState(object):
|
||||
before_init = namedtuple("before_init", [])
|
||||
initalizing = namedtuple("initalizing_browser",
|
||||
["test", "test_queue", "failure_count"])
|
||||
running = namedtuple("running", ["test", "test_queue"])
|
||||
restarting = namedtuple("restarting", ["test", "test_queue"])
|
||||
initializing = namedtuple("initializing_browser",
|
||||
["test", "test_group", "group_metadata", "failure_count"])
|
||||
running = namedtuple("running", ["test", "test_group", "group_metadata"])
|
||||
restarting = namedtuple("restarting", ["test", "test_group", "group_metadata"])
|
||||
error = namedtuple("error", [])
|
||||
stop = namedtuple("stop", [])
|
||||
|
||||
|
@ -249,9 +247,7 @@ RunnerManagerState = _RunnerManagerState()
|
|||
|
||||
|
||||
class TestRunnerManager(threading.Thread):
|
||||
init_lock = threading.Lock()
|
||||
|
||||
def __init__(self, suite_name, tests, test_source_cls, browser_cls, browser_kwargs,
|
||||
def __init__(self, suite_name, test_queue, test_source_cls, browser_cls, browser_kwargs,
|
||||
executor_cls, executor_kwargs, stop_flag, pause_after_test=False,
|
||||
pause_on_unexpected=False, restart_on_unexpected=True, debug_info=None):
|
||||
"""Thread that owns a single TestRunner process and any processes required
|
||||
|
@ -271,9 +267,7 @@ class TestRunnerManager(threading.Thread):
|
|||
"""
|
||||
self.suite_name = suite_name
|
||||
|
||||
self.tests = tests
|
||||
self.test_source_cls = test_source_cls
|
||||
self.test_queue = None
|
||||
self.test_source = test_source_cls(test_queue)
|
||||
|
||||
self.browser_cls = browser_cls
|
||||
self.browser_kwargs = browser_kwargs
|
||||
|
@ -281,8 +275,6 @@ class TestRunnerManager(threading.Thread):
|
|||
self.executor_cls = executor_cls
|
||||
self.executor_kwargs = executor_kwargs
|
||||
|
||||
self.test_source = None
|
||||
|
||||
# Flags used to shut down this thread if we get a sigint
|
||||
self.parent_stop_flag = stop_flag
|
||||
self.child_stop_flag = multiprocessing.Event()
|
||||
|
@ -321,15 +313,14 @@ class TestRunnerManager(threading.Thread):
|
|||
that the manager should shut down the next time the event loop
|
||||
spins."""
|
||||
self.logger = structuredlog.StructuredLogger(self.suite_name)
|
||||
with self.browser_cls(self.logger, **self.browser_kwargs) as browser, self.test_source_cls(self.tests) as test_source:
|
||||
with self.browser_cls(self.logger, **self.browser_kwargs) as browser:
|
||||
self.browser = BrowserManager(self.logger,
|
||||
browser,
|
||||
self.command_queue,
|
||||
no_timeout=self.debug_info is not None)
|
||||
self.test_source = test_source
|
||||
dispatch = {
|
||||
RunnerManagerState.before_init: self.start_init,
|
||||
RunnerManagerState.initalizing: self.init,
|
||||
RunnerManagerState.initializing: self.init,
|
||||
RunnerManagerState.running: self.run_test,
|
||||
RunnerManagerState.restarting: self.restart_runner
|
||||
}
|
||||
|
@ -374,7 +365,7 @@ class TestRunnerManager(threading.Thread):
|
|||
def wait_event(self):
|
||||
dispatch = {
|
||||
RunnerManagerState.before_init: {},
|
||||
RunnerManagerState.initalizing:
|
||||
RunnerManagerState.initializing:
|
||||
{
|
||||
"init_succeeded": self.init_succeeded,
|
||||
"init_failed": self.init_failed,
|
||||
|
@ -432,19 +423,18 @@ class TestRunnerManager(threading.Thread):
|
|||
return
|
||||
return f(*data)
|
||||
|
||||
|
||||
def should_stop(self):
|
||||
return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()
|
||||
|
||||
def start_init(self):
|
||||
test, test_queue = self.get_next_test()
|
||||
test, test_group, group_metadata = self.get_next_test()
|
||||
if test is None:
|
||||
return RunnerManagerState.stop()
|
||||
else:
|
||||
return RunnerManagerState.initalizing(test, test_queue, 0)
|
||||
return RunnerManagerState.initializing(test, test_group, group_metadata, 0)
|
||||
|
||||
def init(self):
|
||||
assert isinstance(self.state, RunnerManagerState.initalizing)
|
||||
assert isinstance(self.state, RunnerManagerState.initializing)
|
||||
if self.state.failure_count > self.max_restarts:
|
||||
self.logger.error("Max restarts exceeded")
|
||||
return RunnerManagerState.error()
|
||||
|
@ -455,17 +445,19 @@ class TestRunnerManager(threading.Thread):
|
|||
if result is Stop:
|
||||
return RunnerManagerState.error()
|
||||
elif not result:
|
||||
return RunnerManagerState.initalizing(self.state.test,
|
||||
self.state.test_queue,
|
||||
self.state.failure_count + 1)
|
||||
return RunnerManagerState.initializing(self.state.test,
|
||||
self.state.test_group,
|
||||
self.state.group_metadata,
|
||||
self.state.failure_count + 1)
|
||||
else:
|
||||
self.executor_kwargs["group_metadata"] = self.state.group_metadata
|
||||
self.start_test_runner()
|
||||
|
||||
def start_test_runner(self):
|
||||
# Note that we need to be careful to start the browser before the
|
||||
# test runner to ensure that any state set when the browser is started
|
||||
# can be passed in to the test runner.
|
||||
assert isinstance(self.state, RunnerManagerState.initalizing)
|
||||
assert isinstance(self.state, RunnerManagerState.initializing)
|
||||
assert self.command_queue is not None
|
||||
assert self.remote_queue is not None
|
||||
self.logger.info("Starting runner")
|
||||
|
@ -486,34 +478,32 @@ class TestRunnerManager(threading.Thread):
|
|||
# Now we wait for either an init_succeeded event or an init_failed event
|
||||
|
||||
def init_succeeded(self):
|
||||
assert isinstance(self.state, RunnerManagerState.initalizing)
|
||||
assert isinstance(self.state, RunnerManagerState.initializing)
|
||||
self.browser.after_init()
|
||||
return RunnerManagerState.running(self.state.test,
|
||||
self.state.test_queue)
|
||||
self.state.test_group,
|
||||
self.state.group_metadata)
|
||||
|
||||
def init_failed(self):
|
||||
assert isinstance(self.state, RunnerManagerState.initalizing)
|
||||
assert isinstance(self.state, RunnerManagerState.initializing)
|
||||
self.browser.after_init()
|
||||
self.stop_runner(force=True)
|
||||
return RunnerManagerState.initalizing(self.state.test,
|
||||
self.state.test_queue,
|
||||
self.state.failure_count + 1)
|
||||
return RunnerManagerState.initializing(self.state.test,
|
||||
self.state.test_group,
|
||||
self.state.group_metadata,
|
||||
self.state.failure_count + 1)
|
||||
|
||||
def get_next_test(self, test_queue=None):
|
||||
def get_next_test(self, test_group=None):
|
||||
test = None
|
||||
while test is None:
|
||||
if test_queue is None:
|
||||
test_queue = self.test_source.get_queue()
|
||||
if test_queue is None:
|
||||
while test_group is None or len(test_group) == 0:
|
||||
test_group, group_metadata = self.test_source.group()
|
||||
if test_group is None:
|
||||
self.logger.info("No more tests")
|
||||
return None, None
|
||||
try:
|
||||
# Need to block here just to allow for contention with other processes
|
||||
test = test_queue.get(block=True, timeout=2)
|
||||
except Empty:
|
||||
if test_queue.empty():
|
||||
test_queue = None
|
||||
return test, test_queue
|
||||
return None, None, None
|
||||
test = test_group.popleft()
|
||||
return test, test_group, group_metadata
|
||||
|
||||
|
||||
def run_test(self):
|
||||
assert isinstance(self.state, RunnerManagerState.running)
|
||||
|
@ -522,7 +512,8 @@ class TestRunnerManager(threading.Thread):
|
|||
if self.browser.update_settings(self.state.test):
|
||||
self.logger.info("Restarting browser for new test environment")
|
||||
return RunnerManagerState.restarting(self.state.test,
|
||||
self.state.test_queue)
|
||||
self.state.test_group,
|
||||
self.state.group_metadata)
|
||||
|
||||
self.logger.test_start(self.state.test.id)
|
||||
self.send_message("run_test", self.state.test)
|
||||
|
@ -560,6 +551,11 @@ class TestRunnerManager(threading.Thread):
|
|||
# Write the result of the test harness
|
||||
expected = test.expected()
|
||||
status = file_result.status if file_result.status != "EXTERNAL-TIMEOUT" else "TIMEOUT"
|
||||
|
||||
if file_result.status in ("TIMEOUT", "EXTERNAL-TIMEOUT"):
|
||||
if self.browser.check_for_crashes():
|
||||
status = "CRASH"
|
||||
|
||||
is_unexpected = expected != status
|
||||
if is_unexpected:
|
||||
self.unexpected_count += 1
|
||||
|
@ -595,22 +591,22 @@ class TestRunnerManager(threading.Thread):
|
|||
|
||||
def after_test_end(self, restart):
|
||||
assert isinstance(self.state, RunnerManagerState.running)
|
||||
test, test_queue = self.get_next_test()
|
||||
test, test_group, group_metadata = self.get_next_test()
|
||||
if test is None:
|
||||
return RunnerManagerState.stop()
|
||||
if test_queue != self.state.test_queue:
|
||||
if test_group != self.state.test_group:
|
||||
# We are starting a new group of tests, so force a restart
|
||||
restart = True
|
||||
if restart:
|
||||
return RunnerManagerState.restarting(test, test_queue)
|
||||
return RunnerManagerState.restarting(test, test_group, group_metadata)
|
||||
else:
|
||||
return RunnerManagerState.running(test, test_queue)
|
||||
return RunnerManagerState.running(test, test_group, group_metadata)
|
||||
|
||||
def restart_runner(self):
|
||||
"""Stop and restart the TestRunner"""
|
||||
assert isinstance(self.state, RunnerManagerState.restarting)
|
||||
self.stop_runner()
|
||||
return RunnerManagerState.initalizing(self.state.test, self.state.test_queue, 0)
|
||||
return RunnerManagerState.initializing(self.state.test, self.state.test_group, self.state.group_metadata, 0)
|
||||
|
||||
def log(self, action, kwargs):
|
||||
getattr(self.logger, action)(**kwargs)
|
||||
|
@ -673,34 +669,16 @@ class TestRunnerManager(threading.Thread):
|
|||
except Empty:
|
||||
break
|
||||
|
||||
class TestQueue(object):
|
||||
def __init__(self, test_source_cls, test_type, tests, **kwargs):
|
||||
self.queue = None
|
||||
self.test_source_cls = test_source_cls
|
||||
self.test_type = test_type
|
||||
self.tests = tests
|
||||
self.kwargs = kwargs
|
||||
|
||||
def __enter__(self):
|
||||
if not self.tests[self.test_type]:
|
||||
return None
|
||||
def make_test_queue(tests, test_source_cls, **test_source_kwargs):
|
||||
queue = test_source_cls.make_queue(tests, **test_source_kwargs)
|
||||
|
||||
self.queue = Queue()
|
||||
has_tests = self.test_source_cls.queue_tests(self.queue,
|
||||
self.test_type,
|
||||
self.tests,
|
||||
**self.kwargs)
|
||||
# There is a race condition that means sometimes we continue
|
||||
# before the tests have been written to the underlying pipe.
|
||||
# Polling the pipe for data here avoids that
|
||||
self.queue._reader.poll(10)
|
||||
assert not self.queue.empty()
|
||||
return self.queue
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
if self.queue is not None:
|
||||
self.queue.close()
|
||||
self.queue = None
|
||||
# There is a race condition that means sometimes we continue
|
||||
# before the tests have been written to the underlying pipe.
|
||||
# Polling the pipe for data here avoids that
|
||||
queue._reader.poll(10)
|
||||
assert not queue.empty()
|
||||
return queue
|
||||
|
||||
|
||||
class ManagerGroup(object):
|
||||
|
@ -730,7 +708,6 @@ class ManagerGroup(object):
|
|||
# of sigint
|
||||
self.stop_flag = threading.Event()
|
||||
self.logger = structuredlog.StructuredLogger(suite_name)
|
||||
self.test_queue = None
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
@ -741,31 +718,29 @@ class ManagerGroup(object):
|
|||
def run(self, test_type, tests):
|
||||
"""Start all managers in the group"""
|
||||
self.logger.debug("Using %i processes" % self.size)
|
||||
type_tests = tests[test_type]
|
||||
if not type_tests:
|
||||
self.logger.info("No %s tests to run" % test_type)
|
||||
return
|
||||
|
||||
self.test_queue = TestQueue(self.test_source_cls,
|
||||
test_type,
|
||||
tests,
|
||||
**self.test_source_kwargs)
|
||||
with self.test_queue as test_queue:
|
||||
if test_queue is None:
|
||||
self.logger.info("No %s tests to run" % test_type)
|
||||
return
|
||||
for _ in range(self.size):
|
||||
manager = TestRunnerManager(self.suite_name,
|
||||
test_queue,
|
||||
self.test_source_cls,
|
||||
self.browser_cls,
|
||||
self.browser_kwargs,
|
||||
self.executor_cls,
|
||||
self.executor_kwargs,
|
||||
self.stop_flag,
|
||||
self.pause_after_test,
|
||||
self.pause_on_unexpected,
|
||||
self.restart_on_unexpected,
|
||||
self.debug_info)
|
||||
manager.start()
|
||||
self.pool.add(manager)
|
||||
self.wait()
|
||||
test_queue = make_test_queue(type_tests, self.test_source_cls, **self.test_source_kwargs)
|
||||
|
||||
for _ in range(self.size):
|
||||
manager = TestRunnerManager(self.suite_name,
|
||||
test_queue,
|
||||
self.test_source_cls,
|
||||
self.browser_cls,
|
||||
self.browser_kwargs,
|
||||
self.executor_cls,
|
||||
self.executor_kwargs,
|
||||
self.stop_flag,
|
||||
self.pause_after_test,
|
||||
self.pause_on_unexpected,
|
||||
self.restart_on_unexpected,
|
||||
self.debug_info)
|
||||
manager.start()
|
||||
self.pool.add(manager)
|
||||
self.wait()
|
||||
|
||||
def is_alive(self):
|
||||
"""Boolean indicating whether any manager in the group is still alive"""
|
||||
|
|
|
@ -18,6 +18,7 @@ skip: true
|
|||
skip: false
|
||||
"""
|
||||
|
||||
|
||||
def test_filter_unicode():
|
||||
tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10),
|
||||
("test", "c", 10))
|
||||
|
|
|
@ -40,7 +40,7 @@ class CreateMetadataPatch(Step):
|
|||
"""Create a patch/commit for the metadata checkout"""
|
||||
|
||||
def create(self, state):
|
||||
if state.no_patch:
|
||||
if not state.patch:
|
||||
return
|
||||
|
||||
local_tree = state.local_tree
|
||||
|
|
|
@ -169,7 +169,7 @@ class CreateSyncPatch(Step):
|
|||
"""Add the updated test files to a commit/patch in the local tree."""
|
||||
|
||||
def create(self, state):
|
||||
if state.no_patch:
|
||||
if not state.patch:
|
||||
return
|
||||
|
||||
local_tree = state.local_tree
|
||||
|
|
|
@ -30,7 +30,7 @@ class LoadConfig(Step):
|
|||
state.tests_path = state.paths["/"]["tests_path"]
|
||||
state.metadata_path = state.paths["/"]["metadata_path"]
|
||||
|
||||
assert state.tests_path.startswith("/")
|
||||
assert os.path.isabs(state.tests_path)
|
||||
|
||||
|
||||
class LoadTrees(Step):
|
||||
|
@ -71,7 +71,7 @@ class SyncFromUpstream(Step):
|
|||
with state.push(["sync", "paths", "metadata_path", "tests_path", "local_tree",
|
||||
"sync_tree"]):
|
||||
state.target_rev = kwargs["rev"]
|
||||
state.no_patch = kwargs["no_patch"]
|
||||
state.patch = kwargs["patch"]
|
||||
state.suite_name = kwargs["suite_name"]
|
||||
state.path_excludes = kwargs["exclude"]
|
||||
state.path_includes = kwargs["include"]
|
||||
|
@ -90,7 +90,7 @@ class UpdateMetadata(Step):
|
|||
with state.push(["local_tree", "sync_tree", "paths", "serve_root"]):
|
||||
state.run_log = kwargs["run_log"]
|
||||
state.ignore_existing = kwargs["ignore_existing"]
|
||||
state.no_patch = kwargs["no_patch"]
|
||||
state.patch = kwargs["patch"]
|
||||
state.suite_name = kwargs["suite_name"]
|
||||
state.product = kwargs["product"]
|
||||
state.config = kwargs["config"]
|
||||
|
|
|
@ -12,8 +12,8 @@ import mozprocess
|
|||
|
||||
|
||||
__all__ = ["SeleniumServer", "ChromeDriverServer",
|
||||
"GeckoDriverServer", "ServoDriverServer",
|
||||
"WebDriverServer"]
|
||||
"GeckoDriverServer", "InternetExplorerDriverServer",
|
||||
"ServoDriverServer", "WebDriverServer"]
|
||||
|
||||
|
||||
class WebDriverServer(object):
|
||||
|
@ -125,7 +125,7 @@ class SeleniumServer(WebDriverServer):
|
|||
|
||||
|
||||
class ChromeDriverServer(WebDriverServer):
|
||||
default_base_path = "/wd/hub"
|
||||
default_base_path = "/"
|
||||
|
||||
def __init__(self, logger, binary="chromedriver", port=None,
|
||||
base_path="", args=None):
|
||||
|
@ -149,6 +149,17 @@ class EdgeDriverServer(WebDriverServer):
|
|||
"--port=%s" % str(self.port)] + self._args
|
||||
|
||||
|
||||
class InternetExplorerDriverServer(WebDriverServer):
|
||||
def __init__(self, logger, binary="IEDriverServer.exe", port=None,
|
||||
base_path="", host="localhost", args=None):
|
||||
WebDriverServer.__init__(
|
||||
self, logger, binary, host=host, port=port, args=args)
|
||||
|
||||
def make_command(self):
|
||||
return [self.binary,
|
||||
"--port=%s" % str(self.port)] + self._args
|
||||
|
||||
|
||||
class GeckoDriverServer(WebDriverServer):
|
||||
def __init__(self, logger, marionette_port=2828, binary="geckodriver",
|
||||
host="127.0.0.1", port=None, args=None):
|
||||
|
|
|
@ -7,6 +7,7 @@ from distutils.spawn import find_executable
|
|||
|
||||
import config
|
||||
import wpttest
|
||||
import formatters
|
||||
|
||||
|
||||
def abs_path(path):
|
||||
|
@ -46,8 +47,10 @@ def create_parser(product_choices=None):
|
|||
|
||||
TEST is either the full path to a test file to run, or the URL of a test excluding
|
||||
scheme host and port.""")
|
||||
parser.add_argument("--manifest-update", action="store_true", default=False,
|
||||
parser.add_argument("--manifest-update", action="store_true", default=None,
|
||||
help="Regenerate the test manifest.")
|
||||
parser.add_argument("--no-manifest-update", action="store_false", dest="manifest_update",
|
||||
help="Prevent regeneration of the test manifest.")
|
||||
|
||||
parser.add_argument("--timeout-multiplier", action="store", type=float, default=None,
|
||||
help="Multiplier relative to standard test timeout to use")
|
||||
|
@ -68,6 +71,9 @@ scheme host and port.""")
|
|||
mode_group.add_argument("--list-disabled", action="store_true",
|
||||
default=False,
|
||||
help="List the tests that are disabled on the current platform")
|
||||
mode_group.add_argument("--list-tests", action="store_true",
|
||||
default=False,
|
||||
help="List all tests that will run")
|
||||
|
||||
test_selection_group = parser.add_argument_group("Test Selection")
|
||||
test_selection_group.add_argument("--test-types", action="store",
|
||||
|
@ -132,6 +138,11 @@ scheme host and port.""")
|
|||
default=None, help="Browser against which to run tests")
|
||||
config_group.add_argument("--config", action="store", type=abs_path, dest="config",
|
||||
help="Path to config file")
|
||||
config_group.add_argument("--install-fonts", action="store_true",
|
||||
default=None,
|
||||
help="Allow the wptrunner to install fonts on your system")
|
||||
config_group.add_argument("--font-dir", action="store", type=abs_path, dest="font_dir",
|
||||
help="Path to local font installation directory", default=None)
|
||||
|
||||
build_type = parser.add_mutually_exclusive_group()
|
||||
build_type.add_argument("--debug-build", dest="debug", action="store_true",
|
||||
|
@ -179,6 +190,15 @@ scheme host and port.""")
|
|||
help="Defines an extra user preference (overrides those in prefs_root)")
|
||||
gecko_group.add_argument("--leak-check", dest="leak_check", action="store_true",
|
||||
help="Enable leak checking")
|
||||
gecko_group.add_argument("--stylo-threads", action="store", type=int, default=1,
|
||||
help="Number of parallel threads to use for stylo")
|
||||
gecko_group.add_argument("--reftest-internal", dest="reftest_internal", action="store_true",
|
||||
default=None, help="Enable reftest runner implemented inside Marionette")
|
||||
gecko_group.add_argument("--reftest-external", dest="reftest_internal", action="store_false",
|
||||
help="Disable reftest runner implemented inside Marionette")
|
||||
gecko_group.add_argument("--reftest-screenshot", dest="reftest_screenshot", action="store",
|
||||
choices=["always", "fail", "unexpected"], default="unexpected",
|
||||
help="With --reftest-internal, when to take a screenshot")
|
||||
|
||||
servo_group = parser.add_argument_group("Servo-specific")
|
||||
servo_group.add_argument("--user-stylesheet",
|
||||
|
@ -211,6 +231,8 @@ scheme host and port.""")
|
|||
help="List of URLs for tests to run, or paths including tests to run. "
|
||||
"(equivalent to --include)")
|
||||
|
||||
commandline.log_formatters["wptreport"] = (formatters.WptreportFormatter, "wptreport format")
|
||||
|
||||
commandline.add_logging_group(parser)
|
||||
return parser
|
||||
|
||||
|
@ -386,6 +408,10 @@ def check_args(kwargs):
|
|||
kwargs['extra_prefs'] = [tuple(prefarg.split('=', 1)) for prefarg in
|
||||
kwargs['extra_prefs']]
|
||||
|
||||
if kwargs["reftest_internal"] is None:
|
||||
# Default to the internal reftest implementation on Linux and OSX
|
||||
kwargs["reftest_internal"] = sys.platform.startswith("linux") or sys.platform.startswith("darwin")
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
|
@ -394,6 +420,15 @@ def check_args_update(kwargs):
|
|||
|
||||
if kwargs["product"] is None:
|
||||
kwargs["product"] = "firefox"
|
||||
if kwargs["patch"] is None:
|
||||
kwargs["patch"] = kwargs["sync"]
|
||||
|
||||
for item in kwargs["run_log"]:
|
||||
if os.path.isdir(item):
|
||||
print >> sys.stderr, "Log file %s is a directory" % item
|
||||
sys.exit(1)
|
||||
|
||||
return kwargs
|
||||
|
||||
|
||||
def create_parser_update(product_choices=None):
|
||||
|
@ -421,10 +456,12 @@ def create_parser_update(product_choices=None):
|
|||
parser.add_argument("--branch", action="store", type=abs_path,
|
||||
help="Remote branch to sync against")
|
||||
parser.add_argument("--rev", action="store", help="Revision to sync to")
|
||||
parser.add_argument("--no-patch", action="store_true",
|
||||
help="Don't create an mq patch or git commit containing the changes.")
|
||||
parser.add_argument("--patch", action="store_true", dest="patch", default=None,
|
||||
help="Create a VCS commit containing the changes.")
|
||||
parser.add_argument("--no-patch", action="store_false", dest="patch",
|
||||
help="Don't create a VCS commit containing the changes.")
|
||||
parser.add_argument("--sync", dest="sync", action="store_true", default=False,
|
||||
help="Sync the tests with the latest from upstream")
|
||||
help="Sync the tests with the latest from upstream (implies --patch)")
|
||||
parser.add_argument("--ignore-existing", action="store_true", help="When updating test results only consider results from the logfiles provided, not existing expectations.")
|
||||
parser.add_argument("--continue", action="store_true", help="Continue a previously started run of the update script")
|
||||
parser.add_argument("--abort", action="store_true", help="Clear state from a previous incomplete run of the update script")
|
||||
|
|
|
@ -61,6 +61,8 @@ class Compiler(NodeVisitor):
|
|||
self.tree = tree
|
||||
self.output_node = self._initial_output_node(tree, **kwargs)
|
||||
self.visit(tree)
|
||||
if hasattr(self.output_node, "set_defaults"):
|
||||
self.output_node.set_defaults()
|
||||
assert self.output_node is not None
|
||||
return self.output_node
|
||||
|
||||
|
|
|
@ -139,6 +139,9 @@ class ManifestItem(object):
|
|||
rv.extend(" %s" % line for line in str(item).split("\n"))
|
||||
return "\n".join(rv)
|
||||
|
||||
def set_defaults(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
def is_empty(self):
|
||||
if self._data:
|
||||
|
|
|
@ -10,6 +10,7 @@ import testloader
|
|||
import wptcommandline
|
||||
import wptlogging
|
||||
import wpttest
|
||||
from font import FontInstaller
|
||||
from testrunner import ManagerGroup
|
||||
from browsers.base import NullBrowser
|
||||
|
||||
|
@ -73,8 +74,10 @@ def list_test_groups(test_paths, product, **kwargs):
|
|||
|
||||
ssl_env = env.ssl_env(logger, **kwargs)
|
||||
|
||||
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
|
||||
|
||||
run_info, test_loader = get_loader(test_paths, product, ssl_env,
|
||||
**kwargs)
|
||||
run_info_extras=run_info_extras, **kwargs)
|
||||
|
||||
for item in sorted(test_loader.groups(kwargs["test_types"])):
|
||||
print item
|
||||
|
@ -85,10 +88,12 @@ def list_disabled(test_paths, product, **kwargs):
|
|||
|
||||
rv = []
|
||||
|
||||
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
|
||||
|
||||
ssl_env = env.ssl_env(logger, **kwargs)
|
||||
|
||||
run_info, test_loader = get_loader(test_paths, product, ssl_env,
|
||||
**kwargs)
|
||||
run_info_extras=run_info_extras, **kwargs)
|
||||
|
||||
for test_type, tests in test_loader.disabled_tests.iteritems():
|
||||
for test in tests:
|
||||
|
@ -96,6 +101,22 @@ def list_disabled(test_paths, product, **kwargs):
|
|||
print json.dumps(rv, indent=2)
|
||||
|
||||
|
||||
def list_tests(test_paths, product, **kwargs):
|
||||
env.do_delayed_imports(logger, test_paths)
|
||||
|
||||
rv = []
|
||||
|
||||
ssl_env = env.ssl_env(logger, **kwargs)
|
||||
|
||||
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
|
||||
|
||||
run_info, test_loader = get_loader(test_paths, product, ssl_env,
|
||||
run_info_extras=run_info_extras, **kwargs)
|
||||
|
||||
for test in test_loader.test_ids:
|
||||
print test
|
||||
|
||||
|
||||
def get_pause_after_test(test_loader, **kwargs):
|
||||
total_tests = sum(len(item) for item in test_loader.tests.itervalues())
|
||||
if kwargs["pause_after_test"] is None:
|
||||
|
@ -121,6 +142,12 @@ def run_tests(config, test_paths, product, **kwargs):
|
|||
|
||||
check_args(**kwargs)
|
||||
|
||||
if kwargs["install_fonts"]:
|
||||
env_extras.append(FontInstaller(
|
||||
font_dir=kwargs["font_dir"],
|
||||
ahem=os.path.join(kwargs["tests_root"], "fonts/Ahem.ttf")
|
||||
))
|
||||
|
||||
if "test_loader" in kwargs:
|
||||
run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None,
|
||||
extras=run_info_extras(**kwargs))
|
||||
|
@ -132,13 +159,13 @@ def run_tests(config, test_paths, product, **kwargs):
|
|||
run_info_extras=run_info_extras(**kwargs),
|
||||
**kwargs)
|
||||
|
||||
test_source_kwargs = {"processes": kwargs["processes"]}
|
||||
if kwargs["run_by_dir"] is False:
|
||||
test_source_cls = testloader.SingleTestSource
|
||||
test_source_kwargs = {}
|
||||
else:
|
||||
# A value of None indicates infinite depth
|
||||
test_source_cls = testloader.PathGroupedSource
|
||||
test_source_kwargs = {"depth": kwargs["run_by_dir"]}
|
||||
test_source_kwargs["depth"] = kwargs["run_by_dir"]
|
||||
|
||||
logger.info("Using %i client processes" % kwargs["processes"])
|
||||
|
||||
|
@ -231,7 +258,6 @@ def run_tests(config, test_paths, product, **kwargs):
|
|||
if repeat_until_unexpected and unexpected_total > 0:
|
||||
break
|
||||
logger.suite_end()
|
||||
|
||||
return unexpected_total == 0
|
||||
|
||||
def start(**kwargs):
|
||||
|
@ -239,6 +265,8 @@ def start(**kwargs):
|
|||
list_test_groups(**kwargs)
|
||||
elif kwargs["list_disabled"]:
|
||||
list_disabled(**kwargs)
|
||||
elif kwargs["list_tests"]:
|
||||
list_tests(**kwargs)
|
||||
else:
|
||||
return not run_tests(**kwargs)
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import os
|
||||
|
||||
import mozinfo
|
||||
from collections import defaultdict
|
||||
|
||||
from wptmanifest.parser import atoms
|
||||
|
||||
|
@ -66,6 +65,8 @@ def get_run_info(metadata_root, product, **kwargs):
|
|||
|
||||
class RunInfo(dict):
|
||||
def __init__(self, metadata_root, product, debug, extras=None):
|
||||
import mozinfo
|
||||
|
||||
self._update_mozinfo(metadata_root)
|
||||
self.update(mozinfo.info)
|
||||
self["product"] = product
|
||||
|
@ -74,12 +75,20 @@ class RunInfo(dict):
|
|||
elif "debug" not in self:
|
||||
# Default to release
|
||||
self["debug"] = False
|
||||
if product == "firefox" and "stylo" not in self:
|
||||
self["stylo"] = False
|
||||
if "STYLO_FORCE_ENABLED" in os.environ:
|
||||
self["stylo"] = True
|
||||
if "STYLO_FORCE_DISABLED" in os.environ:
|
||||
self["stylo"] = False
|
||||
if extras is not None:
|
||||
self.update(extras)
|
||||
|
||||
def _update_mozinfo(self, metadata_root):
|
||||
"""Add extra build information from a mozinfo.json file in a parent
|
||||
directory"""
|
||||
import mozinfo
|
||||
|
||||
path = metadata_root
|
||||
dirs = set()
|
||||
while path != os.path.expanduser('~'):
|
||||
|
@ -113,6 +122,11 @@ class Test(object):
|
|||
def __eq__(self, other):
|
||||
return self.id == other.id
|
||||
|
||||
def update_metadata(self, metadata=None):
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
return metadata
|
||||
|
||||
@classmethod
|
||||
def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
|
||||
timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
|
||||
|
@ -320,6 +334,17 @@ class ReftestTest(Test):
|
|||
|
||||
return node
|
||||
|
||||
def update_metadata(self, metadata):
|
||||
if not "url_count" in metadata:
|
||||
metadata["url_count"] = defaultdict(int)
|
||||
for reference, _ in self.references:
|
||||
# We assume a naive implementation in which a url with multiple
|
||||
# possible screenshots will need to take both the lhs and rhs screenshots
|
||||
# for each possible match
|
||||
metadata["url_count"][(self.environment["protocol"], reference.url)] += 1
|
||||
reference.update_metadata(metadata)
|
||||
return metadata
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.url
|
||||
|
|
|
@ -282,10 +282,10 @@ class TestDirectoryHandler(TestUsingServer):
|
|||
assert resp.info()["Content-Type"] == "text/html"
|
||||
|
||||
def test_subdirectory_no_trailing_slash(self):
|
||||
with pytest.raises(HTTPError) as cm:
|
||||
self.request("/subdir")
|
||||
|
||||
assert cm.value.code == 404
|
||||
# This seems to resolve the 301 transparently, so test for 200
|
||||
resp = self.request("/subdir")
|
||||
assert resp.getcode() == 200
|
||||
assert resp.info()["Content-Type"] == "text/html"
|
||||
|
||||
|
||||
class TestAsIsHandler(TestUsingServer):
|
||||
|
|
|
@ -58,7 +58,9 @@ class DirectoryHandler(object):
|
|||
url_path = request.url_parts.path
|
||||
|
||||
if not url_path.endswith("/"):
|
||||
raise HTTPException(404)
|
||||
response.status = 301
|
||||
response.headers = [("Location", "%s/" % request.url)]
|
||||
return
|
||||
|
||||
path = filesystem_path(self.base_path, request, self.url_base)
|
||||
|
||||
|
|
|
@ -465,9 +465,11 @@ class ResponseWriter(object):
|
|||
raise ValueError
|
||||
|
||||
def flush(self):
|
||||
"""Flush the output."""
|
||||
"""Flush the output. Returns False if the flush failed due to
|
||||
the socket being closed by the remote end."""
|
||||
try:
|
||||
self._wfile.flush()
|
||||
return True
|
||||
except socket.error:
|
||||
# This can happen if the socket got closed by the remote end
|
||||
pass
|
||||
return False
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue