diff --git a/deps/v8/test/benchmarks/csuite/compare-baseline.py b/deps/v8/test/benchmarks/csuite/compare-baseline.py
index ec4bfe467d4ec6..bbc505e3a724a3 100755
--- a/deps/v8/test/benchmarks/csuite/compare-baseline.py
+++ b/deps/v8/test/benchmarks/csuite/compare-baseline.py
@@ -67,7 +67,7 @@ def NormalizedSigmaToString(normalized_sigma):
def ComputeZ(baseline_avg, baseline_sigma, mean, n):
if baseline_sigma == 0:
- return 1000.0;
+ return 1000.0
return abs((mean - baseline_avg) / (baseline_sigma / math.sqrt(n)))
# Values from http://www.fourmilab.ch/rpkp/experiments/analysis/zCalc.html
@@ -185,7 +185,7 @@ def ProcessResults(opts, results, baselines):
if suite in results:
for result in results[suite]:
ProcessOneResultLine(opts, suite, result[0], result[1], result[2],
- result[3], baselines);
+ result[3], baselines)
PrintSeparator(opts, baselines, False)
def ProcessFile(file_path):
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
index eebb578fb7583c..f31a0d46a35582 100644
--- a/deps/v8/test/debugger/testcfg.py
+++ b/deps/v8/test/debugger/testcfg.py
@@ -36,7 +36,7 @@ def __init__(self, *args, **kwargs):
def _parse_source_files(self, source):
files_list = [] # List of file names to append to command arguments.
- files_match = FILES_PATTERN.search(source);
+ files_match = FILES_PATTERN.search(source)
# Accept several lines of 'Files:'.
while True:
if files_match:
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index a8297798721f94..594e8ba7194ef6 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -89,7 +89,7 @@ def __init__(self, *args, **kwargs):
source = self.get_source()
files_list = [] # List of file names to append to command arguments.
- files_match = FILES_PATTERN.search(source);
+ files_match = FILES_PATTERN.search(source)
# Accept several lines of 'Files:'.
while True:
if files_match:
diff --git a/deps/v8/test/preparser/testcfg.py b/deps/v8/test/preparser/testcfg.py
index 0567564278e113..cb1d3ac6dd4b73 100644
--- a/deps/v8/test/preparser/testcfg.py
+++ b/deps/v8/test/preparser/testcfg.py
@@ -62,8 +62,8 @@ def MkTest(replacement, expectation):
testname = name
testsource = source
for key in replacement.keys():
- testname = testname.replace("$" + key, replacement[key]);
- testsource = testsource.replace("$" + key, replacement[key]);
+ testname = testname.replace("$" + key, replacement[key])
+ testsource = testsource.replace("$" + key, replacement[key])
Test(testname, testsource, expectation)
return MkTest
execfile(pathname, {"Test": Test, "Template": Template})
diff --git a/deps/v8/test/wasm-js/testcfg.py b/deps/v8/test/wasm-js/testcfg.py
index 8d67366e30b7ab..22a7ead6cdfc37 100644
--- a/deps/v8/test/wasm-js/testcfg.py
+++ b/deps/v8/test/wasm-js/testcfg.py
@@ -64,7 +64,7 @@ def _get_files_params(self):
thisdir = os.path.dirname(self._get_source_path())
script = os.path.join(thisdir, script)
else:
- raise Exception("Unexpected absolute path for script: \"%s\"" % script);
+ raise Exception("Unexpected absolute path for script: \"%s\"" % script)
files.append(script)
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index 500f44656b9e36..cac5ecb64388fb 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -60,7 +60,7 @@ def __init__(self, *args, **kwargs):
def _parse_source_files(self, source):
files_list = [] # List of file names to append to command arguments.
- files_match = FILES_PATTERN.search(source);
+ files_match = FILES_PATTERN.search(source)
# Accept several lines of 'Files:'.
while True:
if files_match:
diff --git a/deps/v8/third_party/binutils/detect_v8_host_arch.py b/deps/v8/third_party/binutils/detect_v8_host_arch.py
index 32587746922aba..34aa76382d225d 100644
--- a/deps/v8/third_party/binutils/detect_v8_host_arch.py
+++ b/deps/v8/third_party/binutils/detect_v8_host_arch.py
@@ -40,13 +40,13 @@ def main():
return 0
def DoMain(_):
- return DetectHostArch();
+ return DetectHostArch()
def DetectHostArch():
"""Hook to be called from gyp without starting a separate python
interpreter."""
host_arch = platform.machine()
- host_system = platform.system();
+ host_system = platform.system()
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
diff --git a/deps/v8/tools/adb-d8.py b/deps/v8/tools/adb-d8.py
index 4d4390fe05f48d..fdddda2cda53c7 100755
--- a/deps/v8/tools/adb-d8.py
+++ b/deps/v8/tools/adb-d8.py
@@ -29,9 +29,9 @@
def CreateFileHandlerClass(root_dirs, verbose):
class FileHandler(SocketServer.BaseRequestHandler):
def handle(self):
- data = self.request.recv(1024);
+ data = self.request.recv(1024)
while data[-1] != "\0":
- data += self.request.recv(1024);
+ data += self.request.recv(1024)
filename = data[0:-1]
@@ -47,7 +47,7 @@ def handle(self):
sys.stdout.write("Serving {}\r\n".format(os.path.relpath(filename)))
with open(filename) as f:
- contents = f.read();
+ contents = f.read()
self.request.sendall(struct.pack("!i", len(contents)))
self.request.sendall(contents)
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
index 70db89b5da5a09..9be75daa728440 100755
--- a/deps/v8/tools/callstats.py
+++ b/deps/v8/tools/callstats.py
@@ -209,7 +209,7 @@ def run_site(site, domain, args, timeout=None):
# If none of these two happened, then chrome apparently crashed, so
# it must be called again.
if status != 124 and status != 0:
- print("CHROME CRASHED, REPEATING RUN");
+ print("CHROME CRASHED, REPEATING RUN")
continue
# If the stats file is empty, chrome must be called again.
if os.path.isfile(result) and os.path.getsize(result) > 0:
@@ -227,7 +227,7 @@ def run_site(site, domain, args, timeout=None):
timeout += 2 ** retries_since_good_run
retries_since_good_run += 1
print("EMPTY RESULT, REPEATING RUN ({})".format(
- retries_since_good_run));
+ retries_since_good_run))
finally:
if not args.user_data_dir:
shutil.rmtree(user_data_dir)
@@ -366,7 +366,7 @@ def add_category_total(entries, groups, category_prefix):
def read_stats(path, domain, args):
- groups = [];
+ groups = []
if args.aggregate:
groups = [
('Group-IC', re.compile(".*IC_.*")),
@@ -529,11 +529,11 @@ def sum_up(parent, key, other):
# dominates execution time and speedometer is measured elsewhere.
excluded_domains = ['adwords.google.com', 'speedometer-angular',
'speedometer-jquery', 'speedometer-backbone',
- 'speedometer-ember', 'speedometer-vanilla'];
+ 'speedometer-ember', 'speedometer-vanilla']
# Sum up all the entries/metrics from all non-excluded domains
for domain, entries in domains.items():
if domain in excluded_domains:
- continue;
+ continue
for key, domain_stats in entries.items():
if key not in total:
total[key] = {}
diff --git a/deps/v8/tools/deprecation_stats.py b/deps/v8/tools/deprecation_stats.py
index 628eebc7793b39..ec5f4d6c7d3934 100755
--- a/deps/v8/tools/deprecation_stats.py
+++ b/deps/v8/tools/deprecation_stats.py
@@ -43,7 +43,7 @@ def GetBlame(file_path):
blame_list.append(current_blame)
return blame_list
-RE_MACRO_END = re.compile(r"\);");
+RE_MACRO_END = re.compile(r"\);")
RE_DEPRECATE_MACRO = re.compile(r"\(.*?,(.*)\);", re.MULTILINE)
def FilterAndPrint(blame_list, macro, before):
diff --git a/deps/v8/tools/gen-inlining-tests.py b/deps/v8/tools/gen-inlining-tests.py
index 400386c49c6607..c6d59573d99652 100644
--- a/deps/v8/tools/gen-inlining-tests.py
+++ b/deps/v8/tools/gen-inlining-tests.py
@@ -393,11 +393,11 @@ def printtest(flags):
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + local)
elif catchReturns and not catchWithLocal:
- write(" return 2 + ex;");
+ write(" return 2 + ex;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + result[1])
elif catchWithLocal:
- write(" local += ex;");
+ write(" local += ex;")
if isinstance(result, tuple) and result[0] == "throw":
local += result[1]
result = None
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 02b08735980693..51efec8b2fb34e 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -229,7 +229,7 @@
{ 'name': 'class_SharedFunctionInfo__function_data__Object',
'value': 'SharedFunctionInfo::kFunctionDataOffset' },
-];
+]
#
# The following useful fields are missing accessors, so we define fake ones.
@@ -276,7 +276,7 @@
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
'String, length, int32_t, kLengthOffset',
-];
+]
#
# The following is a whitelist of classes we expect to find when scanning the
@@ -287,17 +287,17 @@
'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
'SeqOneByteString', 'SharedFunctionInfo', 'ScopeInfo', 'JSPromise'
-];
+]
#
# The following structures store high-level representations of the structures
# for which we're going to emit descriptive constants.
#
-types = {}; # set of all type names
-typeclasses = {}; # maps type names to corresponding class names
-klasses = {}; # known classes, including parents
-fields = []; # field declarations
+types = {} # set of all type names
+typeclasses = {} # maps type names to corresponding class names
+klasses = {} # known classes, including parents
+fields = [] # field declarations
header = '''
/*
@@ -326,7 +326,7 @@
#undef FRAME_CONST
-''' % sys.argv[0];
+''' % sys.argv[0]
footer = '''
}
@@ -340,14 +340,14 @@
#
def get_base_class(klass):
if (klass == 'Object'):
- return klass;
+ return klass
if (not (klass in klasses)):
- return None;
+ return None
- k = klasses[klass];
+ k = klasses[klass]
- return get_base_class(k['parent']);
+ return get_base_class(k['parent'])
#
# Loads class hierarchy and type information from "objects.h" etc.
@@ -356,9 +356,9 @@ def load_objects():
#
# Construct a dictionary for the classes we're sure should be present.
#
- checktypes = {};
+ checktypes = {}
for klass in expected_classes:
- checktypes[klass] = True;
+ checktypes[klass] = True
for filename in sys.argv[2:]:
@@ -367,16 +367,16 @@ def load_objects():
if (len(checktypes) > 0):
for klass in checktypes:
- print('error: expected class \"%s\" not found' % klass);
+ print('error: expected class \"%s\" not found' % klass)
- sys.exit(1);
+ sys.exit(1)
def load_objects_from_file(objfilename, checktypes):
- objfile = open(objfilename, 'r');
- in_insttype = False;
+ objfile = open(objfilename, 'r')
+ in_insttype = False
- typestr = '';
+ typestr = ''
#
# Iterate the header file line-by-line to collect type and class
@@ -386,41 +386,41 @@ def load_objects_from_file(objfilename, checktypes):
#
for line in objfile:
if (line.startswith('enum InstanceType : uint16_t {')):
- in_insttype = True;
- continue;
+ in_insttype = True
+ continue
if (in_insttype and line.startswith('};')):
- in_insttype = False;
- continue;
+ in_insttype = False
+ continue
- line = re.sub('//.*', '', line.strip());
+ line = re.sub('//.*', '', line.strip())
if (in_insttype):
- typestr += line;
- continue;
+ typestr += line
+ continue
match = re.match(r'class(?:\s+V8_EXPORT(?:_PRIVATE)?)?'
r'\s+(\w[^:]*)'
r'(?:: public (\w[^{]*))?\s*{\s*',
- line);
+ line)
if (match):
- klass = match.group(1).strip();
- pklass = match.group(2);
+ klass = match.group(1).strip()
+ pklass = match.group(2)
if (pklass):
# Strip potential template arguments from parent
# class.
- match = re.match(r'(\w+)(<.*>)?', pklass.strip());
- pklass = match.group(1).strip();
+ match = re.match(r'(\w+)(<.*>)?', pklass.strip())
+ pklass = match.group(1).strip()
- klasses[klass] = { 'parent': pklass };
+ klasses[klass] = { 'parent': pklass }
#
# Process the instance type declaration.
#
- entries = typestr.split(',');
+ entries = typestr.split(',')
for entry in entries:
- types[re.sub('\s*=.*', '', entry).lstrip()] = True;
+ types[re.sub('\s*=.*', '', entry).lstrip()] = True
#
# Infer class names for each type based on a systematic transformation.
@@ -433,7 +433,7 @@ def load_objects_from_file(objfilename, checktypes):
#
# REGEXP behaves like REG_EXP, as in JS_REGEXP_TYPE => JSRegExp.
#
- usetype = re.sub('_REGEXP_', '_REG_EXP_', type);
+ usetype = re.sub('_REGEXP_', '_REG_EXP_', type)
#
# Remove the "_TYPE" suffix and then convert to camel case,
@@ -441,22 +441,22 @@ def load_objects_from_file(objfilename, checktypes):
# "JS_FUNCTION_TYPE" => "JSFunction").
#
if (not usetype.endswith('_TYPE')):
- continue;
+ continue
- usetype = usetype[0:len(usetype) - len('_TYPE')];
- parts = usetype.split('_');
- cctype = '';
+ usetype = usetype[0:len(usetype) - len('_TYPE')]
+ parts = usetype.split('_')
+ cctype = ''
if (parts[0] == 'JS'):
- cctype = 'JS';
- start = 1;
+ cctype = 'JS'
+ start = 1
else:
- cctype = '';
- start = 0;
+ cctype = ''
+ start = 0
for ii in range(start, len(parts)):
- part = parts[ii];
- cctype += part[0].upper() + part[1:].lower();
+ part = parts[ii]
+ cctype += part[0].upper() + part[1:].lower()
#
# Mapping string types is more complicated. Both types and
@@ -489,26 +489,26 @@ def load_objects_from_file(objfilename, checktypes):
cctype.find('Sliced') == -1):
if (cctype.find('OneByte') != -1):
cctype = re.sub('OneByteString$',
- 'SeqOneByteString', cctype);
+ 'SeqOneByteString', cctype)
else:
cctype = re.sub('String$',
- 'SeqString', cctype);
+ 'SeqString', cctype)
if (cctype.find('OneByte') == -1):
cctype = re.sub('String$', 'TwoByteString',
- cctype);
+ cctype)
if (not (cctype in klasses)):
- cctype = re.sub('OneByte', '', cctype);
- cctype = re.sub('TwoByte', '', cctype);
+ cctype = re.sub('OneByte', '', cctype)
+ cctype = re.sub('TwoByte', '', cctype)
#
# Despite all that, some types have no corresponding class.
#
if (cctype in klasses):
- typeclasses[type] = cctype;
+ typeclasses[type] = cctype
if (cctype in checktypes):
- del checktypes[cctype];
+ del checktypes[cctype]
#
# For a given macro call, pick apart the arguments and return an object
@@ -518,36 +518,36 @@ def parse_field(call):
# Replace newlines with spaces.
for ii in range(0, len(call)):
if (call[ii] == '\n'):
- call[ii] == ' ';
+ call[ii] == ' '
- idx = call.find('(');
- kind = call[0:idx];
- rest = call[idx + 1: len(call) - 1];
- args = re.split('\s*,\s*', rest);
+ idx = call.find('(')
+ kind = call[0:idx]
+ rest = call[idx + 1: len(call) - 1]
+ args = re.split('\s*,\s*', rest)
- consts = [];
+ consts = []
if (kind == 'ACCESSORS' or kind == 'ACCESSORS2' or
kind == 'ACCESSORS_GCSAFE'):
- klass = args[0];
- field = args[1];
+ klass = args[0]
+ field = args[1]
dtype = args[2].replace('<', '_').replace('>', '_')
- offset = args[3];
+ offset = args[3]
return ({
'name': 'class_%s__%s__%s' % (klass, field, dtype),
'value': '%s::%s' % (klass, offset)
- });
+ })
- assert(kind == 'SMI_ACCESSORS' or kind == 'ACCESSORS_TO_SMI');
- klass = args[0];
- field = args[1];
- offset = args[2];
+ assert(kind == 'SMI_ACCESSORS' or kind == 'ACCESSORS_TO_SMI')
+ klass = args[0]
+ field = args[1]
+ offset = args[2]
return ({
'name': 'class_%s__%s__%s' % (klass, field, 'SMI'),
'value': '%s::%s' % (klass, offset)
- });
+ })
#
# Load field offset information from objects-inl.h etc.
@@ -558,11 +558,11 @@ def load_fields():
load_fields_from_file(filename)
for body in extras_accessors:
- fields.append(parse_field('ACCESSORS(%s)' % body));
+ fields.append(parse_field('ACCESSORS(%s)' % body))
def load_fields_from_file(filename):
- inlfile = open(filename, 'r');
+ inlfile = open(filename, 'r')
#
# Each class's fields and the corresponding offsets are described in the
@@ -572,47 +572,47 @@ def load_fields_from_file(filename):
# call parse_field() to pick apart the invocation.
#
prefixes = [ 'ACCESSORS', 'ACCESSORS2', 'ACCESSORS_GCSAFE',
- 'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ];
- current = '';
- opens = 0;
+ 'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ]
+ current = ''
+ opens = 0
for line in inlfile:
if (opens > 0):
# Continuation line
for ii in range(0, len(line)):
if (line[ii] == '('):
- opens += 1;
+ opens += 1
elif (line[ii] == ')'):
- opens -= 1;
+ opens -= 1
if (opens == 0):
- break;
+ break
- current += line[0:ii + 1];
- continue;
+ current += line[0:ii + 1]
+ continue
for prefix in prefixes:
if (not line.startswith(prefix + '(')):
- continue;
+ continue
if (len(current) > 0):
- fields.append(parse_field(current));
- current = '';
+ fields.append(parse_field(current))
+ current = ''
for ii in range(len(prefix), len(line)):
if (line[ii] == '('):
- opens += 1;
+ opens += 1
elif (line[ii] == ')'):
- opens -= 1;
+ opens -= 1
if (opens == 0):
- break;
+ break
- current += line[0:ii + 1];
+ current += line[0:ii + 1]
if (len(current) > 0):
- fields.append(parse_field(current));
- current = '';
+ fields.append(parse_field(current))
+ current = ''
#
# Emit a block of constants.
@@ -625,56 +625,56 @@ def emit_set(out, consts):
name = ws.sub('', const['name'])
value = ws.sub('', str(const['value'])) # Can be a number.
out.write('int v8dbg_%s = %s;\n' % (name, value))
- out.write('\n');
+ out.write('\n')
#
# Emit the whole output file.
#
def emit_config():
- out = open(sys.argv[1], 'w');
+ out = open(sys.argv[1], 'w')
- out.write(header);
+ out.write(header)
- out.write('/* miscellaneous constants */\n');
- emit_set(out, consts_misc);
+ out.write('/* miscellaneous constants */\n')
+ emit_set(out, consts_misc)
- out.write('/* class type information */\n');
- consts = [];
+ out.write('/* class type information */\n')
+ consts = []
for typename in sorted(typeclasses):
- klass = typeclasses[typename];
+ klass = typeclasses[typename]
consts.append({
'name': 'type_%s__%s' % (klass, typename),
'value': typename
- });
+ })
- emit_set(out, consts);
+ emit_set(out, consts)
- out.write('/* class hierarchy information */\n');
- consts = [];
+ out.write('/* class hierarchy information */\n')
+ consts = []
for klassname in sorted(klasses):
- pklass = klasses[klassname]['parent'];
- bklass = get_base_class(klassname);
+ pklass = klasses[klassname]['parent']
+ bklass = get_base_class(klassname)
if (bklass != 'Object'):
- continue;
+ continue
if (pklass == None):
- continue;
+ continue
consts.append({
'name': 'parent_%s__%s' % (klassname, pklass),
'value': 0
- });
+ })
- emit_set(out, consts);
+ emit_set(out, consts)
- out.write('/* field information */\n');
- emit_set(out, fields);
+ out.write('/* field information */\n')
+ emit_set(out, fields)
- out.write(footer);
+ out.write(footer)
if (len(sys.argv) < 4):
- print('usage: %s output.cc objects.h objects-inl.h' % sys.argv[0]);
- sys.exit(2);
+ print('usage: %s output.cc objects.h objects-inl.h' % sys.argv[0])
+ sys.exit(2)
-load_objects();
-load_fields();
-emit_config();
+load_objects()
+load_fields()
+emit_config()
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index fa18d85bf50f14..a5f9f16909b310 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -129,7 +129,7 @@ def generate_gni(header_files):
# This list is filled automatically by tools/check_header_includes.py.
check_header_includes_sources = [
-""");
+""")
for header in header_files:
cc_file_name = get_cc_file_name(header)
gn.write(' "{}",\n'.format(os.path.relpath(cc_file_name, V8_DIR)))
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 6d06d00418ba15..8cd2f36a47ea8e 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -167,7 +167,7 @@ def dump_region(reader, start, size, location):
if is_executable is not True and is_ascii is not True:
print("%s - %s" % (reader.FormatIntPtr(start),
reader.FormatIntPtr(start + size)))
- print(start + size + 1);
+ print(start + size + 1)
for i in range(0, size, reader.PointerSize()):
slot = start + i
maybe_address = reader.ReadUIntPtr(slot)
@@ -2138,10 +2138,10 @@ def PrintStackTraceMessage(self, start=None, print_message=True):
def TryExtractStackTrace(self, slot, start, end, print_message):
ptr_size = self.reader.PointerSize()
assert self.reader.ReadUIntPtr(slot) & 0xFFFFFFFF == STACK_TRACE_MARKER
- end_marker = STACK_TRACE_MARKER + 1;
+ end_marker = STACK_TRACE_MARKER + 1
header_size = 10
# Look for the end marker after the fields and the message buffer.
- end_search = start + (32 * 1024) + (header_size * ptr_size);
+ end_search = start + (32 * 1024) + (header_size * ptr_size)
end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
if not end_slot: return start
print("Stack Message (start=%s):" % self.heap.FormatIntPtr(slot))
@@ -2168,9 +2168,9 @@ def FindPtr(self, expected_value, start, end):
def TryExtractErrorMessage(self, slot, start, end, print_message):
ptr_size = self.reader.PointerSize()
- end_marker = ERROR_MESSAGE_MARKER + 1;
+ end_marker = ERROR_MESSAGE_MARKER + 1
header_size = 1
- end_search = start + 1024 + (header_size * ptr_size);
+ end_search = start + 1024 + (header_size * ptr_size)
end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
if not end_slot: return start
print("Error Message (start=%s):" % self.heap.FormatIntPtr(slot))
@@ -2838,7 +2838,7 @@ def output_module_details(self, f, module):
f.write("
")
time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
f.write(" timestamp: %s" % time_date_stamp)
- f.write("
");
+ f.write("
")
def output_modules(self, f):
self.output_header(f)
@@ -3078,7 +3078,7 @@ def output_disasm_range(
f.write("
Test | ") @@ -433,7 +433,7 @@ def Render(args): benchmark_suites[suite_name] = benchmark_suite_object benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name) - benchmark_object.appendResult(run_name, trace); + benchmark_object.appendResult(run_name, trace) renderer = ResultTableRenderer(args.output) diff --git a/deps/v8/tools/release/mergeinfo.py b/deps/v8/tools/release/mergeinfo.py index bed7441f85e2c2..db3cdb8d566e6d 100755 --- a/deps/v8/tools/release/mergeinfo.py +++ b/deps/v8/tools/release/mergeinfo.py @@ -30,7 +30,7 @@ def describe_commit(git_working_dir, hash_to_search, one_line=False): def get_followup_commits(git_working_dir, hash_to_search): cmd = ['log', '--grep=' + hash_to_search, GIT_OPTION_HASH_ONLY, - 'remotes/origin/master']; + 'remotes/origin/master'] return git_execute(git_working_dir, cmd).strip().splitlines() def get_merge_commits(git_working_dir, hash_to_search): diff --git a/deps/v8/tools/release/test_mergeinfo.py b/deps/v8/tools/release/test_mergeinfo.py index f8619bb2fdbd5a..91491e6c6afdb2 100755 --- a/deps/v8/tools/release/test_mergeinfo.py +++ b/deps/v8/tools/release/test_mergeinfo.py @@ -169,22 +169,22 @@ def testIsLkgr(self): self._execute_git(['branch', 'remotes/origin/lkgr']) hash_of_not_lkgr = self._make_empty_commit('This one is not yet lkgr') - branches = self._get_branches(hash_of_first_commit); + branches = self._get_branches(hash_of_first_commit) self.assertTrue(mergeinfo.is_lkgr(branches)) - branches = self._get_branches(hash_of_not_lkgr); + branches = self._get_branches(hash_of_not_lkgr) self.assertFalse(mergeinfo.is_lkgr(branches)) def testShowFirstCanary(self): commits = self._get_commits() hash_of_first_commit = commits[0] - branches = self._get_branches(hash_of_first_commit); + branches = self._get_branches(hash_of_first_commit) self.assertEqual(mergeinfo.get_first_canary(branches), 'No Canary coverage') self._execute_git(['branch', 'remotes/origin/chromium/2345']) self._execute_git(['branch', 'remotes/origin/chromium/2346']) - branches = self._get_branches(hash_of_first_commit); + branches = self._get_branches(hash_of_first_commit) self.assertEqual(mergeinfo.get_first_canary(branches), '2345') def testFirstV8Version(self): @@ -193,16 +193,16 @@ def testFirstV8Version(self): self._execute_git(['branch', 'remotes/origin/chromium/2345']) self._execute_git(['branch', 'remotes/origin/chromium/2346']) - branches = self._get_branches(hash_of_first_commit); + branches = self._get_branches(hash_of_first_commit) self.assertEqual(mergeinfo.get_first_v8_version(branches), '--') self._execute_git(['branch', 'remotes/origin/5.7.1']) self._execute_git(['branch', 'remotes/origin/5.8.1']) - branches = self._get_branches(hash_of_first_commit); + branches = self._get_branches(hash_of_first_commit) self.assertEqual(mergeinfo.get_first_v8_version(branches), '5.7.1') self._execute_git(['branch', 'remotes/origin/5.6.1']) - branches = self._get_branches(hash_of_first_commit); + branches = self._get_branches(hash_of_first_commit) self.assertEqual(mergeinfo.get_first_v8_version(branches), '5.6.1') if __name__ == "__main__": diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py index 51b588f90bb9ad..a29b75c21984a4 100755 --- a/deps/v8/tools/torque/format-torque.py +++ b/deps/v8/tools/torque/format-torque.py @@ -15,7 +15,7 @@ import re from subprocess import Popen, PIPE -kPercentEscape = r'α'; # Unicode alpha +kPercentEscape = r'α' # Unicode alpha def preprocess(input): input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input) @@ -39,7 +39,7 @@ def preprocess(input): input = re.sub(r'(\w+\s*)\|(\s*\w+)', r'\1|/**/\2', input) if old == input: - break; + break input = re.sub(r'\bgenerates\s+\'([^\']+)\'\s*', r' _GeNeRaTeS00_/*\1@*/', input) @@ -94,7 +94,7 @@ def postprocess(output): output = re.sub(r'(\w+)\s{0,1}\|\s{0,1}/\*\*/(\s*\w+)', r'\1 |\2', output) if old == output: - break; + break output = re.sub(kPercentEscape, r'%', output) @@ -115,7 +115,7 @@ def process(filename, lint, should_format): rc = p.returncode if (rc != 0): print("error code " + str(rc) + " running clang-format. Exiting...") - sys.exit(rc); + sys.exit(rc) if lint: if (output != original_input): @@ -123,7 +123,7 @@ def process(filename, lint, should_format): if should_format: output_file = open(filename, 'w') - output_file.write(output); + output_file.write(output) output_file.close() def print_usage(): @@ -135,7 +135,7 @@ def print_usage(): def Main(): if len(sys.argv) < 3: print("error: at least 2 arguments required") - print_usage(); + print_usage() sys.exit(-1) def is_option(arg): @@ -155,8 +155,8 @@ def is_option(arg): should_format = True else: print("error: -i and/or -l flags must be specified") - print_usage(); - sys.exit(-1); + print_usage() + sys.exit(-1) for filename in files: process(filename, lint, should_format) @@ -164,4 +164,4 @@ def is_option(arg): return 0 if __name__ == '__main__': - sys.exit(Main()); + sys.exit(Main()) diff --git a/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py b/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py index fd4b65b8fff9ff..46fbf1403e01ad 100755 --- a/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py +++ b/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py @@ -34,9 +34,9 @@ def parse_args(): hints_bs = open(args.hints_file, "rb").read() with io.open(in_wasm_file, "rb") as fin: with io.open(out_wasm_file, "wb") as fout: - magic_number, bs = read_magic_number(fin); + magic_number, bs = read_magic_number(fin) fout.write(bs) - version, bs = read_version(fin); + version, bs = read_version(fin) fout.write(bs) num_declared_functions = None while True: diff --git a/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py b/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py index a762bd78a61bec..ec854456530975 100755 --- a/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py +++ b/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py @@ -22,8 +22,8 @@ def parse_args(): args = parse_args() in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno() with io.open(in_wasm_file, "rb") as fin: - read_magic_number(fin); - read_version(fin); + read_magic_number(fin) + read_version(fin) while True: id, bs = read_varuintN(fin) if id == None: diff --git a/deps/v8/tools/wasm-compilation-hints/wasm.py b/deps/v8/tools/wasm-compilation-hints/wasm.py index ae3d0841e8d3a6..710e9b0ce630c8 100644 --- a/deps/v8/tools/wasm-compilation-hints/wasm.py +++ b/deps/v8/tools/wasm-compilation-hints/wasm.py @@ -43,10 +43,10 @@ def peek_varuintN(fin): if len(bs) < n: return None, bs b = ord(bs[-1]) - value |= (b & 0x7F) << shift; + value |= (b & 0x7F) << shift if (b & 0x80) == 0x00: return value, bs - shift += 7; + shift += 7 n += 1 def read_varuintN(fin): diff --git a/tools/icu/shrink-icu-src.py b/tools/icu/shrink-icu-src.py index b6e456279b32c2..0df16cde2117d6 100644 --- a/tools/icu/shrink-icu-src.py +++ b/tools/icu/shrink-icu-src.py @@ -86,7 +86,7 @@ def icu_info(icu_full_path): if not icu_ver_major: print(' Could not read U_ICU_VERSION_SHORT version from %s' % uvernum_h) sys.exit(1) - icu_endianness = sys.byteorder[0]; # TODO(srl295): EBCDIC should be 'e' + icu_endianness = sys.byteorder[0] # TODO(srl295): EBCDIC should be 'e' return (icu_ver_major, icu_endianness) (icu_ver_major, icu_endianness) = icu_info(options.icusrc)
---|