Auto merge of #24435 - marmeladema:issue-23607/compat, r=jdm

Issue 23607: first pass of changes for compatibility with Python3

As much as i want to migrate entirely to Python3 (see #23607), it will require some time as changes in web-platform-tests are significant and rely on upstream fixes to be merged and synced downstream.
In the meantime, lets improve compatibility with Python3 so that later, migration will be less painful.

Build system is definitely not ready yet for Python3, but its a step in the right direction.

---
<!-- Thank you for contributing to Servo! Please replace each `[ ]` by `[X]` when the step is complete, and replace `___` with appropriate data: -->
- [x ] `./mach build -d` does not report any errors
- [ x] `./mach test-tidy` does not report any errors

<!-- Also, please make sure that "Allow edits from maintainers" checkbox is checked, so that we can help you if you get stuck somewhere along the way.-->

<!-- Pull requests that do not address these steps are welcome, but they will require additional verification as part of the review process. -->
This commit is contained in:
bors-servo 2019-10-16 11:01:28 -04:00 committed by GitHub
commit 6d488f1be2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 70 additions and 56 deletions

View file

@ -9,3 +9,4 @@ set -o nounset
set -o pipefail set -o pipefail
rm -rf target/ rm -rf target/
rm -rf python/_virtualenv/

View file

@ -32,4 +32,7 @@ boto3 == 1.4.4
# https://github.com/servo/servo/pull/18942 # https://github.com/servo/servo/pull/18942
certifi certifi
# For Python3 compatibility
six
-e python/tidy -e python/tidy

View file

@ -11,7 +11,7 @@ import os
import platform import platform
import shutil import shutil
import subprocess import subprocess
import urllib import six.moves.urllib as urllib
from subprocess import PIPE from subprocess import PIPE
from zipfile import BadZipfile from zipfile import BadZipfile
@ -293,7 +293,7 @@ def windows_msvc(context, force=False):
def prepare_file(zip_path, full_spec): def prepare_file(zip_path, full_spec):
if not os.path.isfile(zip_path): if not os.path.isfile(zip_path):
zip_url = "{}{}.zip".format(deps_url, urllib.quote(full_spec)) zip_url = "{}{}.zip".format(deps_url, urllib.parse.quote(full_spec))
download_file(full_spec, zip_url, zip_path) download_file(full_spec, zip_url, zip_path)
print("Extracting {}...".format(full_spec), end='') print("Extracting {}...".format(full_spec), end='')

View file

@ -18,7 +18,7 @@ import re
import subprocess import subprocess
import sys import sys
import traceback import traceback
import urllib2 import six.moves.urllib as urllib
import glob import glob
from mach.decorators import ( from mach.decorators import (
@ -220,7 +220,7 @@ class MachCommands(CommandBase):
try: try:
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url) content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
except urllib2.URLError: except urllib.error.URLError:
print("Unable to download chromium HSTS preload list; are you connected to the internet?") print("Unable to download chromium HSTS preload list; are you connected to the internet?")
sys.exit(1) sys.exit(1)
@ -244,7 +244,7 @@ class MachCommands(CommandBase):
with open(path.join(preload_path, preload_filename), 'w') as fd: with open(path.join(preload_path, preload_filename), 'w') as fd:
json.dump(entries, fd, indent=4) json.dump(entries, fd, indent=4)
except ValueError, e: except ValueError as e:
print("Unable to parse chromium HSTS preload list, has the format changed?") print("Unable to parse chromium HSTS preload list, has the format changed?")
sys.exit(1) sys.exit(1)
@ -258,7 +258,7 @@ class MachCommands(CommandBase):
try: try:
content = download_bytes("Public suffix list", list_url) content = download_bytes("Public suffix list", list_url)
except urllib2.URLError: except urllib.error.URLError:
print("Unable to download the public suffix list; are you connected to the internet?") print("Unable to download the public suffix list; are you connected to the internet?")
sys.exit(1) sys.exit(1)

View file

@ -16,7 +16,7 @@ import platform
import shutil import shutil
import subprocess import subprocess
import sys import sys
import urllib import six.moves.urllib as urllib
import zipfile import zipfile
import stat import stat
@ -498,7 +498,7 @@ class MachCommands(CommandBase):
print("Downloading GStreamer dependencies") print("Downloading GStreamer dependencies")
gst_url = "https://servo-deps.s3.amazonaws.com/gstreamer/%s" % gst_lib_zip gst_url = "https://servo-deps.s3.amazonaws.com/gstreamer/%s" % gst_lib_zip
print(gst_url) print(gst_url)
urllib.urlretrieve(gst_url, gst_lib_zip) urllib.request.urlretrieve(gst_url, gst_lib_zip)
zip_ref = zipfile.ZipFile(gst_lib_zip, "r") zip_ref = zipfile.ZipFile(gst_lib_zip, "r")
zip_ref.extractall(gst_dir) zip_ref.extractall(gst_dir)
os.remove(gst_lib_zip) os.remove(gst_lib_zip)

View file

@ -7,6 +7,8 @@
# option. This file may not be copied, modified, or distributed # option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
from __future__ import print_function
from errno import ENOENT as NO_SUCH_FILE_OR_DIRECTORY from errno import ENOENT as NO_SUCH_FILE_OR_DIRECTORY
from glob import glob from glob import glob
import shutil import shutil
@ -25,7 +27,7 @@ import tarfile
import zipfile import zipfile
from xml.etree.ElementTree import XML from xml.etree.ElementTree import XML
from servo.util import download_file from servo.util import download_file
import urllib2 import six.moves.urllib as urllib
from bootstrap import check_gstreamer_lib from bootstrap import check_gstreamer_lib
from mach.decorators import CommandArgument from mach.decorators import CommandArgument
@ -105,7 +107,7 @@ def archive_deterministically(dir_to_archive, dest_archive, prepend_path=None):
# packaging (in case of exceptional situations like running out of disk space). # packaging (in case of exceptional situations like running out of disk space).
# TODO do this in a temporary folder after #11983 is fixed # TODO do this in a temporary folder after #11983 is fixed
temp_file = '{}.temp~'.format(dest_archive) temp_file = '{}.temp~'.format(dest_archive)
with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0644), 'w') as out_file: with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), 'w') as out_file:
if dest_archive.endswith('.zip'): if dest_archive.endswith('.zip'):
with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as zip_file: with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for entry in file_list: for entry in file_list:
@ -350,15 +352,15 @@ class CommandBase(object):
version_line = subprocess.check_output(["rustup" + BIN_SUFFIX, "--version"]) version_line = subprocess.check_output(["rustup" + BIN_SUFFIX, "--version"])
except OSError as e: except OSError as e:
if e.errno == NO_SUCH_FILE_OR_DIRECTORY: if e.errno == NO_SUCH_FILE_OR_DIRECTORY:
print "It looks like rustup is not installed. See instructions at " \ print("It looks like rustup is not installed. See instructions at "
"https://github.com/servo/servo/#setting-up-your-environment" "https://github.com/servo/servo/#setting-up-your-environment")
print print()
return 1 return 1
raise raise
version = tuple(map(int, re.match("rustup (\d+)\.(\d+)\.(\d+)", version_line).groups())) version = tuple(map(int, re.match("rustup (\d+)\.(\d+)\.(\d+)", version_line).groups()))
if version < (1, 11, 0): if version < (1, 11, 0):
print "rustup is at version %s.%s.%s, Servo requires 1.11.0 or more recent." % version print("rustup is at version %s.%s.%s, Servo requires 1.11.0 or more recent." % version)
print "Try running 'rustup self update'." print("Try running 'rustup self update'.")
return 1 return 1
toolchain = self.toolchain() toolchain = self.toolchain()
if platform.system() == "Windows": if platform.system() == "Windows":
@ -504,15 +506,15 @@ class CommandBase(object):
nightly_date = nightly_date.strip() nightly_date = nightly_date.strip()
# Fetch the filename to download from the build list # Fetch the filename to download from the build list
repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly" repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly"
req = urllib2.Request( req = urllib.request.Request(
"{}/{}/{}".format(repository_index, os_prefix, nightly_date)) "{}/{}/{}".format(repository_index, os_prefix, nightly_date))
try: try:
response = urllib2.urlopen(req).read() response = urllib.request.urlopen(req).read()
tree = XML(response) tree = XML(response)
namespaces = {'ns': tree.tag[1:tree.tag.index('}')]} namespaces = {'ns': tree.tag[1:tree.tag.index('}')]}
file_to_download = tree.find('ns:Contents', namespaces).find( file_to_download = tree.find('ns:Contents', namespaces).find(
'ns:Key', namespaces).text 'ns:Key', namespaces).text
except urllib2.URLError as e: except urllib.error.URLError as e:
print("Could not fetch the available nightly versions from the repository : {}".format( print("Could not fetch the available nightly versions from the repository : {}".format(
e.reason)) e.reason))
sys.exit(1) sys.exit(1)

View file

@ -14,7 +14,7 @@ from time import time
import signal import signal
import sys import sys
import tempfile import tempfile
import urllib2 import six.moves.urllib as urllib
import json import json
import subprocess import subprocess
@ -201,7 +201,7 @@ class MachCommands(CommandBase):
category='devenv') category='devenv')
def rustup(self): def rustup(self):
url = get_static_rust_lang_org_dist() + "/channel-rust-nightly-date.txt" url = get_static_rust_lang_org_dist() + "/channel-rust-nightly-date.txt"
nightly_date = urllib2.urlopen(url, **get_urlopen_kwargs()).read() nightly_date = urllib.request.urlopen(url, **get_urlopen_kwargs()).read()
toolchain = "nightly-" + nightly_date toolchain = "nightly-" + nightly_date
filename = path.join(self.context.topdir, "rust-toolchain") filename = path.join(self.context.topdir, "rust-toolchain")
with open(filename, "w") as f: with open(filename, "w") as f:

View file

@ -7,6 +7,8 @@
# option. This file may not be copied, modified, or distributed # option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
from __future__ import print_function
import os import os
import sys import sys
@ -20,7 +22,7 @@ class Lint(LintRunner):
def _get_wpt_files(self, suite): def _get_wpt_files(self, suite):
working_dir = os.path.join(WPT_PATH, suite, '') working_dir = os.path.join(WPT_PATH, suite, '')
file_iter = self.get_files(working_dir, exclude_dirs=[]) file_iter = self.get_files(working_dir, exclude_dirs=[])
print '\nRunning the WPT lint on %s...' % working_dir print('\nRunning the WPT lint on %s...' % working_dir)
for f in file_iter: for f in file_iter:
if filter_file(f): if filter_file(f):
yield f[len(working_dir):] yield f[len(working_dir):]

View file

@ -7,6 +7,8 @@
# option. This file may not be copied, modified, or distributed # option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
from __future__ import print_function
import fileinput import fileinput
import re import re
import random import random
@ -28,7 +30,7 @@ def init_variables(if_blocks):
def deleteStatements(file_name, line_numbers): def deleteStatements(file_name, line_numbers):
for line in fileinput.input(file_name, inplace=True): for line in fileinput.input(file_name, inplace=True):
if fileinput.lineno() not in line_numbers: if fileinput.lineno() not in line_numbers:
print line.rstrip() print(line.rstrip())
class Strategy: class Strategy:
@ -48,7 +50,7 @@ class Strategy:
for line in fileinput.input(file_name, inplace=True): for line in fileinput.input(file_name, inplace=True):
if fileinput.lineno() == mutation_line_number: if fileinput.lineno() == mutation_line_number:
line = re.sub(self._replace_strategy['regex'], self._replace_strategy['replaceString'], line) line = re.sub(self._replace_strategy['regex'], self._replace_strategy['replaceString'], line)
print line.rstrip() print(line.rstrip())
return mutation_line_number return mutation_line_number

View file

@ -19,7 +19,7 @@ import shutil
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
import urllib import six.moves.urllib as urllib
from mach.decorators import ( from mach.decorators import (
CommandArgument, CommandArgument,
@ -594,7 +594,7 @@ class PackageCommands(CommandBase):
"/secrets/v1/secret/project/servo/" + "/secrets/v1/secret/project/servo/" +
name name
) )
return json.load(urllib.urlopen(url))["secret"] return json.load(urllib.request.urlopen(url))["secret"]
def get_s3_secret(): def get_s3_secret():
aws_access_key = None aws_access_key = None

View file

@ -18,10 +18,11 @@ import copy
from collections import OrderedDict from collections import OrderedDict
import time import time
import json import json
import urllib2 import six.moves.urllib as urllib
import base64 import base64
import shutil import shutil
import subprocess import subprocess
from six import iteritems
from mach.registrar import Registrar from mach.registrar import Registrar
from mach.decorators import ( from mach.decorators import (
@ -59,7 +60,7 @@ TEST_SUITES = OrderedDict([
"include_arg": "test_name"}), "include_arg": "test_name"}),
]) ])
TEST_SUITES_BY_PREFIX = {path: k for k, v in TEST_SUITES.iteritems() if "paths" in v for path in v["paths"]} TEST_SUITES_BY_PREFIX = {path: k for k, v in iteritems(TEST_SUITES) if "paths" in v for path in v["paths"]}
def create_parser_wpt(): def create_parser_wpt():
@ -158,7 +159,7 @@ class MachCommands(CommandBase):
return 1 return 1
test_start = time.time() test_start = time.time()
for suite, tests in selected_suites.iteritems(): for suite, tests in iteritems(selected_suites):
props = suites[suite] props = suites[suite]
kwargs = props.get("kwargs", {}) kwargs = props.get("kwargs", {})
if tests: if tests:
@ -174,7 +175,7 @@ class MachCommands(CommandBase):
def suite_for_path(self, path_arg): def suite_for_path(self, path_arg):
if os.path.exists(path.abspath(path_arg)): if os.path.exists(path.abspath(path_arg)):
abs_path = path.abspath(path_arg) abs_path = path.abspath(path_arg)
for prefix, suite in TEST_SUITES_BY_PREFIX.iteritems(): for prefix, suite in iteritems(TEST_SUITES_BY_PREFIX):
if abs_path.startswith(prefix): if abs_path.startswith(prefix):
return suite return suite
return None return None
@ -510,9 +511,9 @@ class MachCommands(CommandBase):
elif tracker_api.endswith('/'): elif tracker_api.endswith('/'):
tracker_api = tracker_api[0:-1] tracker_api = tracker_api[0:-1]
query = urllib2.quote(failure['test'], safe='') query = urllib.parse.quote(failure['test'], safe='')
request = urllib2.Request("%s/query.py?name=%s" % (tracker_api, query)) request = urllib.request.Request("%s/query.py?name=%s" % (tracker_api, query))
search = urllib2.urlopen(request) search = urllib.request.urlopen(request)
data = json.load(search) data = json.load(search)
if len(data) == 0: if len(data) == 0:
actual_failures += [failure] actual_failures += [failure]
@ -521,11 +522,11 @@ class MachCommands(CommandBase):
else: else:
qstr = "repo:servo/servo+label:I-intermittent+type:issue+state:open+%s" % failure['test'] qstr = "repo:servo/servo+label:I-intermittent+type:issue+state:open+%s" % failure['test']
# we want `/` to get quoted, but not `+` (github's API doesn't like that), so we set `safe` to `+` # we want `/` to get quoted, but not `+` (github's API doesn't like that), so we set `safe` to `+`
query = urllib2.quote(qstr, safe='+') query = urllib.parse.quote(qstr, safe='+')
request = urllib2.Request("https://api.github.com/search/issues?q=%s" % query) request = urllib.request.Request("https://api.github.com/search/issues?q=%s" % query)
if encoded_auth: if encoded_auth:
request.add_header("Authorization", "Basic %s" % encoded_auth) request.add_header("Authorization", "Basic %s" % encoded_auth)
search = urllib2.urlopen(request) search = urllib.request.urlopen(request)
data = json.load(search) data = json.load(search)
if data['total_count'] == 0: if data['total_count'] == 0:
actual_failures += [failure] actual_failures += [failure]

View file

@ -16,11 +16,11 @@ import platform
import shutil import shutil
from socket import error as socket_error from socket import error as socket_error
import stat import stat
import StringIO from io import BytesIO
import sys import sys
import time import time
import zipfile import zipfile
import urllib2 import six.moves.urllib as urllib
try: try:
@ -101,10 +101,10 @@ def download(desc, src, writer, start_byte=0):
dumb = (os.environ.get("TERM") == "dumb") or (not sys.stdout.isatty()) dumb = (os.environ.get("TERM") == "dumb") or (not sys.stdout.isatty())
try: try:
req = urllib2.Request(src) req = urllib.request.Request(src)
if start_byte: if start_byte:
req = urllib2.Request(src, headers={'Range': 'bytes={}-'.format(start_byte)}) req = urllib.request.Request(src, headers={'Range': 'bytes={}-'.format(start_byte)})
resp = urllib2.urlopen(req, **get_urlopen_kwargs()) resp = urllib.request.urlopen(req, **get_urlopen_kwargs())
fsize = None fsize = None
if resp.info().getheader('Content-Length'): if resp.info().getheader('Content-Length'):
@ -136,16 +136,16 @@ def download(desc, src, writer, start_byte=0):
if not dumb: if not dumb:
print() print()
except urllib2.HTTPError, e: except urllib.error.HTTPError as e:
print("Download failed ({}): {} - {}".format(e.code, e.reason, src)) print("Download failed ({}): {} - {}".format(e.code, e.reason, src))
if e.code == 403: if e.code == 403:
print("No Rust compiler binary available for this platform. " print("No Rust compiler binary available for this platform. "
"Please see https://github.com/servo/servo/#prerequisites") "Please see https://github.com/servo/servo/#prerequisites")
sys.exit(1) sys.exit(1)
except urllib2.URLError, e: except urllib.error.URLError as e:
print("Error downloading {}: {}. The failing URL was: {}".format(desc, e.reason, src)) print("Error downloading {}: {}. The failing URL was: {}".format(desc, e.reason, src))
sys.exit(1) sys.exit(1)
except socket_error, e: except socket_error as e:
print("Looks like there's a connectivity issue, check your Internet connection. {}".format(e)) print("Looks like there's a connectivity issue, check your Internet connection. {}".format(e))
sys.exit(1) sys.exit(1)
except KeyboardInterrupt: except KeyboardInterrupt:
@ -154,7 +154,7 @@ def download(desc, src, writer, start_byte=0):
def download_bytes(desc, src): def download_bytes(desc, src):
content_writer = StringIO.StringIO() content_writer = BytesIO()
download(desc, src, content_writer) download(desc, src, content_writer)
return content_writer.getvalue() return content_writer.getvalue()

View file

@ -7,6 +7,8 @@
# option. This file may not be copied, modified, or distributed # option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
from __future__ import print_function
import contextlib import contextlib
import fnmatch import fnmatch
import imp import imp
@ -14,7 +16,7 @@ import itertools
import json import json
import os import os
import re import re
import StringIO from io import StringIO
import subprocess import subprocess
import sys import sys
@ -23,6 +25,7 @@ import toml
import voluptuous import voluptuous
import yaml import yaml
from licenseck import OLD_MPL, MPL, APACHE, COPYRIGHT, licenses_toml, licenses_dep_toml from licenseck import OLD_MPL, MPL, APACHE, COPYRIGHT, licenses_toml, licenses_dep_toml
from six import iteritems
topdir = os.path.abspath(os.path.dirname(sys.argv[0])) topdir = os.path.abspath(os.path.dirname(sys.argv[0]))
wpt = os.path.join(topdir, "tests", "wpt") wpt = os.path.join(topdir, "tests", "wpt")
@ -342,7 +345,7 @@ def check_flake8(file_name, contents):
"E501", # 80 character line length; the standard tidy process will enforce line length "E501", # 80 character line length; the standard tidy process will enforce line length
} }
output = StringIO.StringIO() output = StringIO()
with stdout_redirect(output): with stdout_redirect(output):
check_code(contents, ignore=ignore) check_code(contents, ignore=ignore)
for error in output.getvalue().splitlines(): for error in output.getvalue().splitlines():
@ -378,7 +381,7 @@ def check_lock(file_name, contents):
if name not in packages_by_name: if name not in packages_by_name:
yield (1, "duplicates are allowed for `{}` but it is not a dependency".format(name)) yield (1, "duplicates are allowed for `{}` but it is not a dependency".format(name))
for (name, packages) in packages_by_name.iteritems(): for (name, packages) in iteritems(packages_by_name):
has_duplicates = len(packages) > 1 has_duplicates = len(packages) > 1
duplicates_allowed = name in exceptions duplicates_allowed = name in exceptions
@ -422,7 +425,7 @@ def check_lock(file_name, contents):
visited_whitelisted_packages[dependency_name][package_name] = True visited_whitelisted_packages[dependency_name][package_name] = True
# Check if all the exceptions to blocked packages actually depend on the blocked package # Check if all the exceptions to blocked packages actually depend on the blocked package
for dependency_name, package_names in blocked_packages.iteritems(): for dependency_name, package_names in iteritems(blocked_packages):
for package_name in package_names: for package_name in package_names:
if not visited_whitelisted_packages[dependency_name].get(package_name): if not visited_whitelisted_packages[dependency_name].get(package_name):
fmt = "Package {} is not required to be an exception of blocked package {}." fmt = "Package {} is not required to be an exception of blocked package {}."
@ -514,7 +517,7 @@ def check_manifest_dirs(config_file, print_text=True):
lines = conf_file.splitlines(True) lines = conf_file.splitlines(True)
if print_text: if print_text:
print '\rChecking the wpt manifest file...' print('\rChecking the wpt manifest file...')
p = parser.parse(lines) p = parser.parse(lines)
paths = rec_parse(wpt_path("web-platform-tests"), p) paths = rec_parse(wpt_path("web-platform-tests"), p)
@ -908,7 +911,7 @@ def check_config_file(config_file, print_text=True):
lines = conf_file.splitlines(True) lines = conf_file.splitlines(True)
if print_text: if print_text:
print '\rChecking the config file...' print('\rChecking the config file...')
config_content = toml.loads(conf_file) config_content = toml.loads(conf_file)
exclude = config_content.get("ignore", {}) exclude = config_content.get("ignore", {})
@ -995,7 +998,7 @@ def parse_config(config_file):
def check_directory_files(directories, print_text=True): def check_directory_files(directories, print_text=True):
if print_text: if print_text:
print '\rChecking directories for correct file extensions...' print('\rChecking directories for correct file extensions...')
for directory, file_extensions in directories.items(): for directory, file_extensions in directories.items():
files = sorted(os.listdir(directory)) files = sorted(os.listdir(directory))
for filename in files: for filename in files:
@ -1015,7 +1018,7 @@ def collect_errors_for_files(files_to_check, checking_functions, line_checking_f
if not has_element: if not has_element:
raise StopIteration raise StopIteration
if print_text: if print_text:
print '\rChecking files for tidiness...' print('\rChecking files for tidiness...')
for filename in files_to_check: for filename in files_to_check:
if not os.path.exists(filename): if not os.path.exists(filename):
@ -1037,7 +1040,7 @@ def collect_errors_for_files(files_to_check, checking_functions, line_checking_f
def get_dep_toml_files(only_changed_files=False): def get_dep_toml_files(only_changed_files=False):
if not only_changed_files: if not only_changed_files:
print '\nRunning the dependency licensing lint...' print('\nRunning the dependency licensing lint...')
for root, directories, filenames in os.walk(".cargo"): for root, directories, filenames in os.walk(".cargo"):
for filename in filenames: for filename in filenames:
if filename == "Cargo.toml": if filename == "Cargo.toml":
@ -1136,11 +1139,11 @@ def scan(only_changed_files=False, progress=True, stylo=False):
error = None error = None
for error in errors: for error in errors:
colorama.init() colorama.init()
print "\r\033[94m{}\033[0m:\033[93m{}\033[0m: \033[91m{}\033[0m".format(*error) print("\r\033[94m{}\033[0m:\033[93m{}\033[0m: \033[91m{}\033[0m".format(*error))
print print()
if error is None: if error is None:
colorama.init() colorama.init()
print "\033[92mtidy reported no errors.\033[0m" print("\033[92mtidy reported no errors.\033[0m")
return int(error is not None) return int(error is not None)