diff --git a/README.md b/README.md
index 43a3d5500db85b11cc4cf8d434e6e7b577e043c8..1ae69d9bc615a6bd74dcd3ecf9ea2e786862a74f 100644
--- a/README.md
+++ b/README.md
@@ -25,19 +25,38 @@ es2panda [OPTIONS] [input file] -- [arguments]
## Running the tests
```sh
-pip install tqdm
+pip install tqdm dataclasses python-dotenv
```
```sh
-python3 test/runner.py [OPTIONS] [build_directory]
+python3 test/runner/runner.py [OPTIONS] [build_directory]
```
### Optional arguments
+#### Test sets
- `--regression`: Run regression tests
- - `--test262`: Run test262
+ - `--test262`: Run test262. To run tests from test262 set specify environment variables `TEST262_REVISION` and `TEST262_URL` in the `.env` file.
+ - `--hermes`: Run Hermes runtime tests. To run tests from hermes set specify environment variables `HERMES_REVISION` and `HERMES_URL` in the `.env` file.
+
+#### Extra arguments
- `--no-progress`: Don't show progress bar
+ - `--verbose`: Generates more detailed output
+
+Other options are described at starter.py file
-### Tail arguments
+#### Tail arguments
- `build_directory`: Path to panda build directory
+### Execution time report
+It is possible to collect statistics how long separate tests work. In the result report tests are grouped by execution time.
+The grouping edges are set in seconds in the environment variable `TIME_EDGES`. For example the value `1 5 10` specifies
+4 groups - less than 1 second, from 1 second to 5 seconds, from 5 seconds to 10 seconds and from 10 seconds and more.
+For the last group the report contains real durations.
+ - Specify the option `--time-report`
+ - Set an environment variable in the `.env` file in the format `TIME_EDGES="1 5 10"`
+ - After test run the short report will be output to the console
+ - And full report will be created at the path `test/time_report.txt`
+
### Skip list
-Skip list for the runtime: `test/test262skiplist.txt, test/test262skiplist-long.txt`.
+Skip list for the runtime:
+ - test262: `test/test262skiplist.txt, test/test262skiplist-long.txt`
+ - hermes: `test/hermes-excluded.txt, test/hermes-excluded-aot-inline-full.txt`.
diff --git a/test/hermes-excluded-AOT-FULL.txt b/test/hermes-excluded-AOT-FULL.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b93473f83e310d5fdd643886ab8712111b62cefc
--- /dev/null
+++ b/test/hermes-excluded-AOT-FULL.txt
@@ -0,0 +1 @@
+env.js
\ No newline at end of file
diff --git a/test/hermes-excluded.txt b/test/hermes-excluded.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5d72ed91046fc7da4d11ebdd6e9c48f312a691da
--- /dev/null
+++ b/test/hermes-excluded.txt
@@ -0,0 +1,278 @@
+# Too long
+array-large.js
+execution-time-limit.js
+fewer-gc-bug0.js # 0:01:08.907835
+gc-stress-test.js # 0:00:22.779755
+instanceof.js
+number-functions.js # 0:13:27.338967
+proxy-recursion-exception.js # 0:00:16.996039
+regexp-stackoverflow.js
+set_regress.js # 0:00:09.259850
+sort-sparse.js # 0:08:19.348478
+stack-overflow.js
+stack-overflow-apply.js # 0:03:06.849030
+regress-weakmap-large.js # 30.06 sec
+
+
+esm/esm-bar.js
+esm/esm-foo.js
+esm/esm-main.js
+
+source-visibility/global-hide-source.js
+source-visibility/global-show-source.js
+source-visibility/override.js
+source-visibility/with-eval.js
+
+intl/collator.js
+intl/get-canonical-locales.js
+intl/test262-to-locale-lowercase.js
+intl/to-locale-uppercase.js
+intl/date-time-format-apple.js
+intl/intl.js
+intl/to-locale-lowercase.js
+
+cjs/cjs-caching-1.js
+cjs/cjs-circle-2.js
+cjs/cjs-exports-1.js
+cjs/cjs-throw-1.js
+cjs/cjs-caching-2.js
+cjs/cjs-circle-3.js
+cjs/cjs-exports-2.js
+cjs/cjs-throw-2.js
+cjs/cjs-caching-3.js
+cjs/cjs-dynamic-1.js
+cjs/cjs-multiple-1.js
+cjs/load-segment-from-repl.js
+cjs/cjs-circle-1.js
+cjs/cjs-dynamic-2.js
+cjs/cjs-multiple-2.js
+cjs/subdir/cjs-subdir-2.js
+cjs/subdir/cjs-subdir-main.js
+cjs/subdir/metadata.json
+cjs/subdir/bar/cjs-subdir-bar.js
+cjs/subdir/foo/cjs-subdir-foo.js
+cjs/subdir-segments/cjs-subdir-2.js
+cjs/subdir-segments/cjs-subdir-main.js
+cjs/subdir-segments/cjs-subdir-shared.js
+cjs/subdir-segments/bar/cjs-subdir-bar.js
+cjs/subdir-segments/foo/cjs-subdir-foo.js
+cjs/subdir-segments-deltamode/test.js
+cjs/subdir-segments-deltamode/base/cjs-subdir-2.js
+cjs/subdir-segments-deltamode/base/cjs-subdir-main.js
+cjs/subdir-segments-deltamode/base/bar/cjs-subdir-bar.js
+cjs/subdir-segments-deltamode/base/foo/cjs-subdir-foo.js
+cjs/subdir-segments-deltamode/update/cjs-subdir-2.js
+cjs/subdir-segments-deltamode/update/cjs-subdir-main.js
+cjs/subdir-segments-deltamode/update/bar/cjs-subdir-bar.js
+cjs/subdir-segments-deltamode/update/foo/cjs-subdir-foo.js
+cjs/subdir-external-ids/cjs-subdir-2.js
+cjs/subdir-external-ids/cjs-subdir-main.js
+cjs/subdir-external-ids/bar/cjs-subdir-bar.js
+cjs/subdir-external-ids/foo/cjs-subdir-foo.js
+cjs/subdir-resolve/cjs-subdir-2.js
+cjs/subdir-resolve/cjs-subdir-main.js
+cjs/subdir-resolve/bar/cjs-subdir-bar.js
+cjs/subdir-resolve/foo/cjs-subdir-foo.js
+cjs/repeated-module-load-order/export12.js
+cjs/repeated-module-load-order/load12.js
+cjs/repeated-module-load-order/main.js
+cjs/repeated-module-load-order/seg1.js
+cjs/repeated-module-load-order/seg3.js
+cjs/repeated-module-load-order/shared12.js
+cjs/repeated-module-load-order/export34.js
+cjs/repeated-module-load-order/load34.js
+cjs/repeated-module-load-order/seg2.js
+cjs/repeated-module-load-order/seg4.js
+cjs/repeated-module-load-order/shared34.js
+
+# ES2PANDA_FAIL
+bytecode.js
+delete-in-catch.js
+delete-variable-nonstrict.js
+eval-redefined.js
+generator.js
+lazy-error-test.js
+logical-assignment.js
+nullish-coalescing.js
+regexp.js
+
+# RUNTIME_FAIL
+DataView.js
+TypedArray.js
+abstract-equality.js
+accessor.js
+arguments-decrement.js
+array-destr-close.js
+array-freeze-seal.js
+array-functions.js
+array-length.js
+array-props1.js
+array-props2.js
+array-props3.js
+array-props4.js
+array-spread-proto.js
+array-spread.js
+array.js
+async-dep-promise.js
+async-function-builtin.js
+async-function-expr.js
+async-function.js
+bigint-asintn.js
+bigint-asuintn.js
+bigint-binary-addition.js
+bigint-binary-ashl.js
+bigint-binary-ashr.js
+bigint-binary-division.js
+bigint-binary-equality.js
+bigint-binary-exponentiate.js
+bigint-binary-lshr.js
+bigint-binary-multiplication.js
+bigint-binary-relops.js
+bigint-binary-remainder.js
+bigint-binary-subtraction.js
+bigint-bitwise-and.js
+bigint-bitwise-or.js
+bigint-bitwise-xor.js
+bigint-constructor.js
+bigint-prototype-tolocalestring.js
+bigint-prototype-tostring.js
+bigint-prototype-valueof.js
+bigint-tilde.js
+bigint-unary-dec.js
+bigint-unary-inc.js
+bigint-unary-minus.js
+bitwise.js
+call-stack.js
+callee-caller-strict.js
+callee-non-strict.js
+calln.js
+computed-props.js
+console-host-job-throw.js
+copyDataProperties.js
+date-constructor.js
+date-default-timezone.js
+date-locale.js
+deep-recursion.js
+define-property.js
+destructuring-init.js
+error-capture-stack-trace-define-property-stack-fails.js
+error-capture-stack-trace.js
+error-cause.js
+error-prepare-stack-trace-bytecode.js
+error-prepare-stack-trace-call-site.js
+error-prepare-stack-trace-uncaught-1.js
+error-prepare-stack-trace-uncaught-2.js
+error-prepare-stack-trace-uncaught-3.js
+error-prepare-stack-trace.js
+error.js
+error_messages.js
+escape.js
+eval-errors.js
+eval-strict.js
+eval.js
+exception.js
+execution-time-limit-eval.js
+external-mem.js
+for-of-close-1.js
+for-of-close-2.js
+for-of.js
+function-constructor.js
+function-non-strict.js
+function-toString.js
+gc-idents.js
+generator-object-model.js
+global-var-no-clear.js
+global.js
+global_arguments.js
+global_properties.js
+hermes-internal-epilogues-empty.js
+hermes-internal-epilogues.js
+hermes-internal-job-queue.js
+hermes-internal-job-throw.js
+hermes-internal-test-methods.js
+hermesc.js
+in.js
+is-concat-spreadable.js
+iterator-close-throw.js
+iterator.js
+json.js
+large-string.js
+large_arrays.js
+lazy-function-toString.js
+lazy-gc-eval.js
+lazy-property-cache.js
+lazy-test.js
+load-this.js
+long-string-function.js
+many_args.js
+map.js
+math.js
+non-strict-var.js
+not-a-function.js
+object-functions.js
+object-init.js
+object-spread.js
+object-used-as-map.js
+optional-chaining.js
+override-static-builtin-error-message.js
+own-property-names-symbols.js
+predefined-var-eval.js
+predefined-var.js
+primitive.js
+print-empty-exception.js
+prohibit-invoke.js
+promise.js
+props1.js
+proxy.js
+put-to-transient.js
+quit.js
+readonly-error-shows-name.js
+reflect.js
+regexp-icase.js
+regexp_bytecode.js
+regexp_escapes.js
+regexp_unicode.js
+regress-apply.js
+regress-array-from.js
+regress-callbound.js
+regress-exp-overflow.js
+regress-math-max-handles.js
+return-in-global-eval.js
+rgb-to-grey.js
+set.js
+setTimeout-flushed-after-throw.js
+setTimeout.js
+setter-stack-overflow.js
+source-url-error.js
+spread-arguments.js
+stacktrace-bound.js
+stacktrace.js
+strict-var.js
+string-functions.js
+string-indexed-props.js
+string-locale.js
+symbol.js
+tagged-template-long.js
+tdz-check.js
+template-literal.js
+template-object-cache.js
+throw-type-error-builtin.js
+transient-obj-prop.js
+truncate-long-stack.js
+typeof.js
+uncatchable-error-to-string.js
+uncatchable-iterator-2.js
+uncatchable-iterator.js
+unsafe-intrinsics-mem.js
+unsafe-intrinsics.js
+uri.js
+use-static-builtin-function.js
+use-static-builtin-global.js
+var-declare-accessor.js
+weakmap-key-in-value.js
+weakmap.js
+weakref_basic.js
+weakset.js
+for_in_bigloop.js
+ordered-hash-map-invariant.js
+symbolid_marking.js
\ No newline at end of file
diff --git a/test/runner.py b/test/runner.py
deleted file mode 100755
index 29a93f9d29e2391494a8d256908876a0b7c7f4e7..0000000000000000000000000000000000000000
--- a/test/runner.py
+++ /dev/null
@@ -1,825 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from glob import glob
-from os import path
-from enum import Enum
-import argparse
-import fnmatch
-import multiprocessing
-import os
-import re
-import subprocess
-import sys
-import test262util
-
-
-def is_directory(parser, arg):
- if not path.isdir(arg):
- parser.error("The directory '%s' does not exist" % arg)
-
- return path.abspath(arg)
-
-
-def is_file(parser, arg):
- if not path.isfile(arg):
- parser.error("The file '%s' does not exist" % arg)
-
- return path.abspath(arg)
-
-
-def check_timeout(value):
- ivalue = int(value)
- if ivalue <= 0:
- raise argparse.ArgumentTypeError(
- "%s is an invalid timeout value" % value)
- return ivalue
-
-
-def get_args():
- parser = argparse.ArgumentParser(description="Regression test runner")
- parser.add_argument(
- 'build_dir', type=lambda arg: is_directory(parser, arg),
- help='panda build directory')
- parser.add_argument(
- '--test262', '-t', action='store_true', dest='test262', default=False,
- help='run test262 tests')
- parser.add_argument(
- '--error', action='store_true', dest='error', default=False,
- help='capture stderr')
- parser.add_argument(
- '--regression', '-r', action='store_true', dest='regression',
- default=False, help='run regression tests')
- parser.add_argument(
- '--tsc', action='store_true', dest='tsc',
- default=False, help='run tsc tests')
- parser.add_argument(
- '--no-progress', action='store_false', dest='progress', default=True,
- help='don\'t show progress bar')
- parser.add_argument(
- '--no-skip', action='store_false', dest='skip', default=True,
- help='don\'t use skiplists')
- parser.add_argument(
- '--update', action='store_true', dest='update', default=False,
- help='update skiplist')
- parser.add_argument(
- '--run-gc-in-place', action='store_true', dest='run_gc_in_place', default=False,
- help='enable --run-gc-in-place mode')
- parser.add_argument(
- '--filter', '-f', action='store', dest='filter',
- default="*", help='test filter regexp')
- parser.add_argument(
- '--es2panda-timeout', type=check_timeout,
- dest='es2panda_timeout', default=60, help='es2panda translator timeout')
- parser.add_argument(
- '--paoc-timeout', type=check_timeout,
- dest='paoc_timeout', default=600, help='paoc compiler timeout')
- parser.add_argument(
- '--timeout', type=check_timeout,
- dest='timeout', default=10, help='JS runtime timeout')
- parser.add_argument(
- '--gc-type', dest='gc_type', default="g1-gc", help='Type of garbage collector')
- parser.add_argument(
- '--heap-verifier', dest='heap_verifier', default="fail_on_verification",
- help='Heap verifier options')
- parser.add_argument(
- '--aot', action='store_true', dest='aot', default=False,
- help='use AOT compilation')
- parser.add_argument(
- '--no-bco', action='store_false', dest='bco', default=True,
- help='disable bytecodeopt')
- parser.add_argument(
- '--jit', action='store_true', dest='jit', default=False,
- help='use JIT in interpreter')
- parser.add_argument(
- '--irtoc', action='store_true', dest='irtoc', default=False,
- help='use irtoc in interpreter')
- parser.add_argument(
- '--arm64-compiler-skip', action='store_true', dest='arm64_compiler_skip', default=False,
- help='use skiplist for tests failing on aarch64 in AOT or JIT mode')
- parser.add_argument(
- '--arm64-qemu', action='store_true', dest='arm64_qemu', default=False,
- help='launch all binaries in qemu aarch64')
- parser.add_argument(
- '--arm32-qemu', action='store_true', dest='arm32_qemu', default=False,
- help='launch all binaries in qemu arm')
- parser.add_argument(
- '--test-list', dest='test_list', default=None, type=lambda arg: is_file(parser, arg),
- help='run tests listed in file')
- parser.add_argument(
- '--aot-args', action='append', dest='aot_args', default=[],
- help='Additional arguments that will passed to ark_aot')
- parser.add_argument(
- '--verbose', '-v', action='store_true', dest='verbose', default=False,
- help='Enable verbose output')
- parser.add_argument(
- '--quick', '-q', action='store_true', dest='quick', default=False,
- help='use bytecode quickener')
-
- return parser.parse_args()
-
-
-class Test:
- def __init__(self, test_path, flags):
- self.path = test_path
- self.flags = flags
- self.output = None
- self.error = None
- self.passed = None
- self.skipped = None
- self.reproduce = ""
-
- def log_cmd(self, cmd, verbose=False):
- cmd_str = ' '.join(cmd)
- self.reproduce += "\n" + cmd_str
- if verbose:
- print(cmd_str, file=sys.stderr)
-
- def run(self, runner):
- cmd = runner.cmd_prefix + [runner.es2panda, "--dump-ast"]
- cmd.extend(self.flags)
- cmd.append(self.path)
-
- self.log_cmd(cmd)
- process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = process.communicate()
- self.output = out.decode("utf-8", errors="ignore")
-
- expected_path = "%s-expected.txt" % (path.splitext(self.path)[0])
- try:
- with open(expected_path, 'r') as fp:
- expected = fp.read()
- self.passed = expected == self.output and process.returncode in [
- 0, 1]
- except Exception:
- self.passed = False
-
- if not self.passed:
- self.error = err.decode("utf-8", errors="ignore")
-
- return self
-
-
-class Test262Test(Test):
- def __init__(self, test_path, flags, test_id, with_optimizer):
- Test.__init__(self, test_path, flags)
- self.test_id = test_id
- self.fail_kind = None
- self.with_optimizer = with_optimizer
-
- class FailKind(Enum):
- ES2PANDA_FAIL = 1
- RUNTIME_FAIL = 2
- AOT_FAIL = 3
- ES2PANDA_TIMEOUT = 4
- RUNTIME_TIMEOUT = 5
- AOT_TIMEOUT = 6
- QUICK_FAIL = 7
- QUICK_TIMEOUT = 8
-
- def run(self, runner):
- with open(self.path, 'r') as fp:
- header = runner.util.get_header(fp.read())
- desc = runner.util.parse_descriptor(header)
-
- test_abc = path.join(runner.tmp_dir, "%s.abc" % self.test_id)
- test_an = path.join(runner.tmp_dir, "%s.an" % self.test_id)
-
- directory = path.dirname(test_abc)
- os.makedirs(directory, exist_ok=True)
-
- cmd = runner.cmd_prefix + [runner.es2panda]
- if self.with_optimizer:
- cmd.append('--opt-level=2')
- cmd.extend(['--thread=0', '--output=%s' % (test_abc)])
-
- if 'module' in desc['flags']:
- cmd.append("--module")
-
- if 'noStrict' in desc['flags']:
- self.skipped = True
- return self
-
- cmd.append(self.path)
-
- self.log_cmd(cmd, runner.args.verbose)
- process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=runner.cmd_env)
-
- try:
- out, err = process.communicate(runner.args.es2panda_timeout)
- except subprocess.TimeoutExpired:
- process.kill()
- self.passed = False
- self.fail_kind = self.FailKind.ES2PANDA_TIMEOUT
- self.error = self.fail_kind.name
- return self
-
- out = out.decode("utf-8", errors="ignore")
- err = err.decode("utf-8", errors="ignore")
- self.passed, need_exec = runner.util.validate_parse_result(
- process.returncode, err, desc, out)
-
- if not self.passed:
- self.fail_kind = self.FailKind.ES2PANDA_FAIL
- self.error = "out:{}\nerr:{}\ncode:{}".format(
- out, err, process.returncode)
- return self
-
- if not need_exec:
- self.passed = True
- return self
-
- if runner.args.quick:
- cmd = runner.cmd_prefix + [runner.arkquick]
- cmd.extend(runner.quick_args)
-
- src_abc = test_abc
- dst_abc = '%s.quick%s' % os.path.splitext(src_abc)
- cmd.extend([src_abc, dst_abc])
-
- test_abc = dst_abc
-
- self.log_cmd(cmd)
-
- if runner.args.verbose:
- print('Run arkquick: %s' % ' '.join(cmd), file=sys.stderr)
-
- process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=runner.cmd_env)
-
- try:
- out, err = process.communicate(timeout=600)
- except subprocess.TimeoutExpired:
- process.kill()
- self.passed = False
- self.fail_kind = self.FailKind.QUICK_TIMEOUT
- self.error = self.fail_kind.name
- return self
-
- if process.returncode != 0:
- self.passed = False
- self.fail_kind = self.FailKind.QUICK_FAIL
- self.error = err.decode("utf-8", errors="ignore")
- return self
-
- if runner.args.aot:
- cmd = runner.cmd_prefix + [runner.arkaot] + runner.aot_args
- cmd.extend(['--paoc-panda-files', test_abc, '--paoc-output', test_an])
-
- if os.path.isfile(test_an):
- os.remove(test_an)
-
- self.log_cmd(cmd, runner.args.verbose)
- process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=runner.cmd_env)
-
- try:
- out, err = process.communicate(runner.args.paoc_timeout)
- except subprocess.TimeoutExpired:
- process.kill()
- self.passed = False
- self.fail_kind = self.FailKind.AOT_TIMEOUT
- self.error = self.fail_kind.name
- return self
-
- if process.returncode != 0:
- self.passed = False
- self.fail_kind = self.FailKind.AOT_FAIL
- self.error = err.decode("utf-8", errors="ignore")
- return self
-
- cmd = runner.cmd_prefix + [runner.runtime] + runner.runtime_args
-
- if runner.args.aot:
- cmd.extend(['--aot-files', test_an])
-
- if runner.args.jit:
- cmd.extend(['--compiler-enable-jit=true', '--compiler-hotness-threshold=0'])
- else:
- cmd.extend(['--compiler-enable-jit=false'])
-
- if runner.args.irtoc:
- cmd.extend(['--interpreter-type=irtoc'])
-
- cmd.extend([test_abc, "_GLOBAL::func_main_0"])
-
- self.log_cmd(cmd, runner.args.verbose)
- process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=runner.cmd_env)
-
- try:
- out, err = process.communicate(timeout=runner.args.timeout)
- except subprocess.TimeoutExpired:
- process.kill()
- self.passed = False
- self.fail_kind = self.FailKind.RUNTIME_TIMEOUT
- self.error = self.fail_kind.name
- return self
-
- out = out.decode("utf-8", errors="ignore")
- err = err.decode("utf-8", errors="ignore")
- self.passed = runner.util.validate_runtime_result(
- process.returncode, err, desc, out)
-
- if not self.passed:
- self.fail_kind = self.FailKind.RUNTIME_FAIL
- self.error = "out:{}\nerr:{}\ncode:{}".format(
- out, err, process.returncode)
-
- return self
-
-
-class TSCTest(Test):
- def __init__(self, test_path, flags):
- Test.__init__(self, test_path, flags)
- self.options = self.parse_options()
-
- def parse_options(self):
- test_options = {}
-
- with open(self.path, "r", encoding="latin1") as f:
- lines = f.read()
- options = re.findall(r"//\s?@\w+:.*\n", lines)
-
- for option in options:
- separated = option.split(":")
- opt = re.findall(r"\w+", separated[0])[0].lower()
- value = separated[1].strip().lower()
-
- if opt == "filename":
- if opt in options:
- test_options[opt].append(value)
- else:
- test_options[opt] = [value]
-
- elif opt == "lib" or opt == "module":
- test_options[opt] = [each.strip()
- for each in value.split(",")]
- elif value == "true" or value == "false":
- test_options[opt] = value.lower() == "true"
- else:
- test_options[opt] = value
-
- # TODO: Possibility of error: all exports will be catched, even the commented ones
- if 'module' not in test_options and re.search(r"export ", lines):
- test_options['module'] = []
-
- return test_options
-
- def run(self, runner):
- cmd = runner.cmd_prefix + [runner.es2panda, '--parse-only', '--extension=ts']
- cmd.extend(self.flags)
- if "module" in self.options:
- cmd.append('--module')
- cmd.append(self.path)
-
- self.log_cmd(cmd)
- process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = process.communicate()
- self.output = out.decode("utf-8", errors="ignore")
-
- self.passed = True if process.returncode == 0 else False
-
- if not self.passed:
- self.error = err.decode("utf-8", errors="ignore")
-
- return self
-
-
-class Runner:
- def __init__(self, args, name):
- self.test_root = path.dirname(path.abspath(__file__))
- self.args = args
- self.name = name
- self.tests = []
- self.failed = 0
- self.passed = 0
- self.es2panda = path.join(args.build_dir, 'bin', 'es2panda')
- self.cmd_prefix = []
-
- if args.arm64_qemu:
- self.cmd_prefix = ["qemu-aarch64", "-L", "/usr/aarch64-linux-gnu/"]
-
- if args.arm32_qemu:
- self.cmd_prefix = ["qemu-arm", "-L", "/usr/arm-linux-gnueabi"]
-
- if not path.isfile(self.es2panda):
- raise Exception("Cannot find es2panda binary: %s" % self.es2panda)
-
- def add_directory(self, directory, extension, flags):
- pass
-
- def test_path(self, src):
- pass
-
- def run_test(self, test):
- return test.run(self)
-
- def run(self):
- pool = multiprocessing.Pool()
- result_iter = pool.imap_unordered(
- self.run_test, self.tests, chunksize=32)
- pool.close()
-
- if self.args.progress:
- from tqdm import tqdm
- result_iter = tqdm(result_iter, total=len(self.tests))
-
- results = []
- for res in result_iter:
- results.append(res)
-
- self.tests = results
- pool.join()
-
- def summarize(self):
- print("")
- fail_list = []
-
- for test in self.tests:
- assert(test.passed is not None)
- if not test.passed:
- fail_list.append(test)
-
- if len(fail_list):
- print("Failed tests:")
- for test in fail_list:
- print(self.test_path(test.path))
-
- if self.args.error:
- print("steps:", test.reproduce)
- print(test.error)
-
- print("")
-
- print("Summary(%s):" % self.name)
- print("\033[37mTotal: %5d" % (len(self.tests)))
- print("\033[92mPassed: %5d" % (len(self.tests) - len(fail_list)))
- print("\033[91mFailed: %5d" % (len(fail_list)))
- print("\033[0m")
-
- return len(fail_list)
-
-
-class RegressionRunner(Runner):
- def __init__(self, args):
- Runner.__init__(self, args, "Regresssion")
-
- def add_directory(self, directory, extension, flags):
- glob_expression = path.join(
- self.test_root, directory, "*.%s" % (extension))
- files = glob(glob_expression)
- files = fnmatch.filter(files, self.test_root + '**' + self.args.filter)
-
- self.tests += list(map(lambda f: Test(f, flags), files))
-
- def test_path(self, src):
- return src
-
-
-class Test262Runner(Runner):
- def __init__(self, args):
- Runner.__init__(self, args, "Test262 ark"),
-
- self.cmd_env = os.environ.copy()
- for san in ["ASAN_OPTIONS", "TSAN_OPTIONS", "MSAN_OPTIONS", "LSAN_OPTIONS"]:
- # we don't want to interpret asan failures as SyntaxErrors
- self.cmd_env[san] = ":exitcode=255"
-
- skiplist_conf_name = ""
- if(args.jit):
- skiplist_conf_name = "JIT"
- elif(args.irtoc):
- skiplist_conf_name = "IRTOC"
- elif(args.aot):
- skiplist_conf_name = "AOT"
- if("'--compiler-inline-full-intrinsics=true'" in args.aot_args):
- skiplist_conf_name+="-FULL"
- else:
- skiplist_conf_name = "INT"
- self.update = args.update
- self.enable_skiplists = False if self.update else args.skip
- self.normal_skiplist_file = "test262skiplist.txt"
- self.long_flaky_skiplist_files = ["test262skiplist-long.txt", "test262skiplist-flaky-%s.txt" % skiplist_conf_name]
- self.normal_skiplist = set([])
- self.runtime = path.join(args.build_dir, 'bin', 'ark')
- if not path.isfile(self.runtime):
- raise Exception("Cannot find runtime binary: %s" % self.runtime)
-
- ecmastdlib_abc = '%s/plugins/ecmascript/ecmastdlib/ecmastdlib.abc' % args.build_dir
-
- if args.quick:
- self.arkquick = path.join(args.build_dir, 'bin', 'arkquick')
- if not path.isfile(self.arkquick):
- raise Exception("Cannot find arkquick binary: %s" % self.arkquick)
-
- self.quick_args = []
-
- # quick ecmastdlib
- cmd = self.cmd_prefix + [self.arkquick]
- cmd.extend(self.quick_args)
- src_abc = ecmastdlib_abc
- dst_abc = '%s.quick%s' % os.path.splitext(src_abc)
- cmd.extend([src_abc, dst_abc])
-
- ecmastdlib_abc = dst_abc
-
- if args.verbose:
- print('quick ecmastdlib: %s' % ' '.join(cmd), file=sys.stderr)
-
- process = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.cmd_env)
-
- try:
- out, err = process.communicate(timeout=600)
- except subprocess.TimeoutExpired:
- process.kill()
- raise Exception("Cannot quick %s: timeout" % self.src_abc)
-
- if process.returncode != 0:
- raise Exception("Cannot quick %s: %d" % (self.src_abc, process.returncode))
-
- self.runtime_args = [
- '--boot-panda-files=%s'
- % ecmastdlib_abc,
- '--load-runtimes=ecmascript',
- '--gc-type=%s' % args.gc_type,
- '--heap-verifier=%s' % args.heap_verifier
- ]
-
- if args.run_gc_in_place:
- self.runtime_args += ['--run-gc-in-place']
- if args.gc_type == "g1-gc":
- # Workaround. If don't specify this option G1 may access a deleted class while
- # iterating over all objects in a region.
- self.runtime_args += ['--g1-track-freed-objects=false']
-
- if args.aot:
- self.arkaot = path.join(args.build_dir, 'bin', 'ark_aot')
- if not path.isfile(self.arkaot):
- raise Exception("Cannot find aot binary: %s" % self.arkaot)
-
- self.aot_args = [
- '--boot-panda-files=%s'
- % ecmastdlib_abc,
- '--load-runtimes=ecmascript',
- '--gc-type=%s' % args.gc_type,
- '--heap-verifier=%s' % args.heap_verifier
- ]
-
- if args.run_gc_in_place:
- self.aot_args += ['--run-gc-in-place']
-
- self.aot_args += args.aot_args
- else:
- self.aot_args = []
-
- self.skiplist_name_list = self.long_flaky_skiplist_files if self.update else []
- self.skiplist_bco_name = ""
-
- if self.enable_skiplists:
- self.skiplist_name_list.append(self.normal_skiplist_file)
- self.skiplist_name_list.extend(self.long_flaky_skiplist_files)
-
- if args.bco:
- self.skiplist_bco_name = "test262skiplist-bco.txt"
-
- self.tmp_dir = path.join(path.sep, 'tmp', 'panda', 'test262')
- os.makedirs(self.tmp_dir, exist_ok=True)
-
- self.util = test262util.Test262Util()
- self.test262_dir = self.util.generate(
- '747bed2e8aaafe8fdf2c65e8a10dd7ae64f66c47',
- args.build_dir,
- path.join(self.test_root, "test262harness.js"),
- args.progress)
-
- self.add_directory(self.test262_dir, "js", args.test_list, [])
-
- def add_directory(self, directory, extension, test_list_path, flags):
- glob_expression = path.join(directory, "**/*.%s" % (extension))
- files = glob(glob_expression, recursive=True)
- files = fnmatch.filter(files, path.join(directory, self.args.filter))
-
- def load_list(p):
- with open(p, 'r') as fp:
- return set(map(lambda e: path.join(directory, e.strip()), fp))
-
- skiplist = set([])
-
- for sl in self.skiplist_name_list:
- skiplist.update(load_list(path.join(self.test_root, sl)))
-
- if self.update:
- self.normal_skiplist.update(load_list(path.join(self.test_root, self.normal_skiplist_file)))
-
- skiplist_bco = set([])
- if self.skiplist_bco_name != "":
- skiplist_bco = load_list(path.join(self.test_root, self.skiplist_bco_name))
-
- if test_list_path is not None:
- test_list = load_list(path.abspath(test_list_path))
- files = filter(lambda f: f in test_list, files)
-
- def get_test_id(file):
- return path.relpath(path.splitext(file)[0], self.test262_dir)
-
- self.tests = list(map(lambda test: Test262Test(test, flags, get_test_id(test), test not in skiplist_bco),
- filter(lambda f: f not in skiplist, files)))
-
- def test_path(self, src):
- return path.relpath(src, self.test262_dir)
-
- def run(self):
- Runner.run(self)
- self.update_skiplist()
-
- def summarize(self):
- print("")
-
- fail_lists = {}
- for kind in Test262Test.FailKind:
- fail_lists[kind] = []
-
- num_failed = 0
- num_skipped = 0
- for test in self.tests:
- if test.skipped:
- num_skipped += 1
- continue
-
- assert(test.passed is not None)
- if not test.passed:
- fail_lists[test.fail_kind].append(test)
- num_failed += 1
-
- def summarize_list(name, tests_list):
- if len(tests_list):
- tests_list.sort(key=lambda test: test.path)
- print("# " + name)
- for test in tests_list:
- print(self.test_path(test.path))
- if self.args.error:
- print("steps:", test.reproduce)
- print(test.error)
- print("")
-
- total_tests = len(self.tests) - num_skipped
-
- if not self.update:
- for kind in Test262Test.FailKind:
- summarize_list(kind.name, fail_lists[kind])
-
- print("Summary(%s):" % self.name)
- print("\033[37mTotal: %5d" % (total_tests))
- print("\033[92mPassed: %5d" % (total_tests - num_failed))
- print("\033[91mFailed: %5d" % (num_failed))
- print("\033[0m")
-
- return num_failed
-
- def update_skiplist(self):
- if not self.update:
- return
-
- skiplist_es2panda = list({x.test_id + ".js" for x in self.tests
- if not x.skipped and not x.passed and
- x.fail_kind == Test262Test.FailKind.ES2PANDA_FAIL})
- skiplist_runtime = list({x.test_id + ".js" for x in self.tests
- if not x.skipped and not x.passed and
- x.fail_kind == Test262Test.FailKind.RUNTIME_FAIL})
-
- skiplist_es2panda.sort()
- skiplist_runtime.sort()
-
- new_skiplist = skiplist_es2panda + skiplist_runtime
-
- new_pass = list(filter(lambda x: len(x) and not x.startswith('#')
- and x not in new_skiplist, self.normal_skiplist))
- new_fail = list(filter(lambda x: x not in self.normal_skiplist, new_skiplist))
- new_pass.sort()
- new_fail.sort()
-
- if new_pass:
- print("\033[92mRemoved from skiplist:")
- print("\n".join(new_pass))
- print("\033[0m")
-
- if new_fail:
- print("\033[91mNew tests on skiplist:")
- print("\n".join(new_fail))
- print("\033[0m")
-
- fd = os.open(path.join(self.test_root, self.normal_skiplist_file), os.O_RDWR | os.O_CREAT | os.O_TRUNC)
- file = os.fdopen(fd, "w+")
- file.write("\n".join(["# ES2PANDA_FAIL"] + skiplist_es2panda + ["", "# RUNTIME_FAIL"] + skiplist_runtime))
- file.write("\n")
- file.close()
-
-
-class TSCRunner(Runner):
- def __init__(self, args):
- Runner.__init__(self, args, "TSC")
-
- ts_dir = path.join(self.test_root, "TypeScript")
- ts_branch = "v4.2.4"
-
- if not path.isdir(ts_dir):
- subprocess.run(
- f"git clone https://github.com/microsoft/TypeScript.git \
- {ts_dir} && cd {ts_dir} \
- && git checkout {ts_branch} > /dev/null 2>&1",
- shell=True,
- stdout=subprocess.DEVNULL,
- )
- else:
- subprocess.run(
- f"cd {ts_dir} && git clean -f > /dev/null 2>&1",
- shell=True,
- stdout=subprocess.DEVNULL,
- )
-
- self.add_directory("conformance", [])
- self.add_directory("compiler", [])
-
- def add_directory(self, directory, flags):
- ts_suite_dir = path.join(self.test_root, 'TypeScript/tests/cases')
-
- glob_expression = path.join(
- ts_suite_dir, directory, "**/*.ts")
- files = glob(glob_expression, recursive=True)
- files = fnmatch.filter(files, ts_suite_dir + '**' + self.args.filter)
-
- for f in files:
- test_name = path.basename(f.split(".ts")[0])
- negative_references = path.join(
- self.test_root, 'TypeScript/tests/baselines/reference')
- is_negative = path.isfile(path.join(negative_references,
- test_name + ".errors.txt"))
- test = TSCTest(f, flags)
-
- if 'target' in test.options:
- targets = test.options['target'].replace(" ", "").split(',')
- for target in targets:
- if path.isfile(path.join(negative_references,
- test_name + "(target=%s).errors.txt" % (target))):
- is_negative = True
- break
-
- if is_negative or "filename" in test.options:
- continue
-
- self.tests.append(test)
-
- def test_path(self, src):
- return src
-
-
-def main():
- args = get_args()
-
- runners = []
-
- if args.regression:
- runner = RegressionRunner(args)
- runner.add_directory("parser/js", "js", ["--parse-only"])
- # TODO(aszilagyi): reenable TS tests
- # runner.add_directory("parser/ts", "ts",
- # ["--parse-only", '--extension=ts'])
- # runner.add_directory("compiler/ts", "ts", ["--extension=ts", ])
-
- runners.append(runner)
-
- if args.test262:
- runners.append(Test262Runner(args))
-
- if args.tsc:
- runners.append(TSCRunner(args))
-
- failed_tests = 0
-
- for runner in runners:
- runner.run()
- failed_tests += runner.summarize()
-
- exit(0 if failed_tests == 0 else 1)
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/runner/.env b/test/runner/.env
new file mode 100644
index 0000000000000000000000000000000000000000..f8cd3eb810ae1ae8bf5ed161c69d5cd9ba96b327
--- /dev/null
+++ b/test/runner/.env
@@ -0,0 +1,5 @@
+HERMES_REVISION=3feac7b2f9759d83879b04232479041baa805e7b
+HERMES_URL=https://github.com/facebook/hermes/archive
+TEST262_REVISION=747bed2e8aaafe8fdf2c65e8a10dd7ae64f66c47
+TEST262_URL=https://github.com/tc39/test262/archive
+TIME_EDGES="1 5 10"
\ No newline at end of file
diff --git a/test/runner/configuration_kind.py b/test/runner/configuration_kind.py
new file mode 100644
index 0000000000000000000000000000000000000000..45958b5c18b4c0286ae70f2d0b85262a0b42a431
--- /dev/null
+++ b/test/runner/configuration_kind.py
@@ -0,0 +1,10 @@
+from enum import Enum
+
+
+class ConfigurationKind(Enum):
+ INT = "INT"
+ AOT = "AOT"
+ AOT_FULL = "AOT-FULL"
+ JIT = "JIT"
+ QUICK = "QUICK"
+ IRTOC = "IRTOC"
diff --git a/test/runner/fail_kind.py b/test/runner/fail_kind.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5636673f37895aaf4343d6e25db2b8157220bf1
--- /dev/null
+++ b/test/runner/fail_kind.py
@@ -0,0 +1,19 @@
+from enum import Enum
+
+
+class FailKind(Enum):
+ ES2PANDA_FAIL = 1
+ ES2PANDA_TIMEOUT = 4
+ ES2PANDA_OTHER = 7
+
+ RUNTIME_FAIL = 2
+ RUNTIME_TIMEOUT = 5
+ RUNTIME_OTHER = 8
+
+ AOT_FAIL = 3
+ AOT_TIMEOUT = 6
+ AOT_OTHER = 9
+
+ QUICK_FAIL = 10
+ QUICK_TIMEOUT = 11
+ QUICK_OTHER = 12
diff --git a/test/runner/hermes_harness.py b/test/runner/hermes_harness.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/test/runner/index_template.html b/test/runner/index_template.html
new file mode 100644
index 0000000000000000000000000000000000000000..9ffec124971e143020dbb895746d79a53f574302
--- /dev/null
+++ b/test/runner/index_template.html
@@ -0,0 +1,93 @@
+
+
+
+
+ ${Title}
+
+
+
+${Title}
+${Options}
+Summary
+
+
+
+ ${Total} |
+
+
+
+ ${Passed} |
+
+
+
+ ${Failed} |
+
+
+
+ ${Ignored} |
+
+
+
+ ${ExcludedThroughLists} |
+
+
+
+ ${ExcludedByOtherReasons} |
+
+
+Failed tests list
+
+
+
\ No newline at end of file
diff --git a/test/runner/params.py b/test/runner/params.py
new file mode 100644
index 0000000000000000000000000000000000000000..df994ed45dfbbb1e809c7fc9ac9d0a1981636953
--- /dev/null
+++ b/test/runner/params.py
@@ -0,0 +1,46 @@
+from dataclasses import dataclass
+from typing import Any, List
+
+from configuration_kind import ConfigurationKind
+from fail_kind import FailKind
+
+
+@dataclass
+class TestEnv:
+ args: Any
+
+ conf_kind: ConfigurationKind
+
+ cmd_prefix: List[str]
+ cmd_env: Any
+
+ es2panda: str
+
+ runtime: str
+ runtime_args: List[str]
+
+ arkaout: str
+ aot_args: List[str]
+
+ ark_quick: str
+ quick_args: List[str]
+
+ util: Any = None
+
+
+@dataclass(frozen=True)
+class Params:
+ timeout: int
+ executor: str
+ fail_kind_fail: FailKind
+ fail_kind_timeout: FailKind
+ fail_kind_other: FailKind
+ flags: list
+ env: Any
+
+
+@dataclass(frozen=True)
+class TestReport:
+ output: str
+ error: str
+ return_code: int
diff --git a/test/runner/report.py b/test/runner/report.py
new file mode 100644
index 0000000000000000000000000000000000000000..268bd33b20c0918d5fefcefff52a96ab05828971
--- /dev/null
+++ b/test/runner/report.py
@@ -0,0 +1,185 @@
+from os import path
+
+from params import TestReport
+
+
+def get_good_line(line):
+ return f'{line}'
+
+
+def get_failed_line(line):
+ return f'{line}'
+
+
+def get_md_good_line(expected, actual):
+ return f"| {expected} | {actual} |"
+
+
+def get_md_failed_line(expected, actual):
+ if expected.strip() != "":
+ expected = f"**{expected}**"
+ if actual.strip() != "":
+ actual = f"**{actual}**"
+ return f"| {expected} | {actual} |"
+
+
+def convert_to_array(output: str):
+ return list(filter(
+ lambda x: len(x) > 0,
+ map(lambda x: x.strip(),
+ output.split("\n"))
+ ))
+
+
+def make_output_diff_html(expected, actual):
+ expected_list = convert_to_array(expected)
+ actual_list = convert_to_array(actual)
+ result_expected = []
+ result_actual = []
+
+ min_len = min(len(expected_list), len(actual_list))
+ for i in range(min_len):
+ expected_line = expected_list[i].strip()
+ actual_line = actual_list[i].strip()
+ if expected_line == actual_line:
+ result_expected.append(get_good_line(expected_line))
+ result_actual.append(get_good_line(actual_line))
+ else:
+ result_expected.append(get_failed_line(expected_line))
+ result_actual.append(get_failed_line(actual_line))
+
+ max_len = max(len(expected_list), len(actual_list))
+ is_expected_remains = len(expected_list) > len(actual_list)
+ for i in range(min_len, max_len):
+ if is_expected_remains:
+ result_expected.append(get_good_line(expected_list[i]))
+ else:
+ result_actual.append(get_good_line(actual_list[i]))
+
+ return result_expected, result_actual
+
+
+def make_output_diff_md(expected, actual):
+ expected_list = convert_to_array(expected)
+ actual_list = convert_to_array(actual)
+ result = []
+
+ min_len = min(len(expected_list), len(actual_list))
+ for i in range(min_len):
+ expected_line = expected_list[i].strip()
+ actual_line = actual_list[i].strip()
+ if expected_line == actual_line:
+ result.append(get_md_good_line(expected_line, actual_line))
+ else:
+ result.append(get_md_failed_line(expected_line, actual_line))
+
+ max_len = max(len(expected_list), len(actual_list))
+ is_expected_remains = len(expected_list) > len(actual_list)
+ for i in range(min_len, max_len):
+ if is_expected_remains:
+ result.append(get_md_good_line(expected_list[i], " "))
+ else:
+ result.append(get_md_failed_line(" ", actual_list[i]))
+
+ return result
+
+
+REPORT_TITLE = "${Title}"
+REPORT_PATH = "${Path}"
+REPORT_STATUS_CLASS = "${status_class}"
+REPORT_STATUS = "${Status}"
+REPORT_REPRODUCE = "${Reproduce}"
+REPORT_RESULT = "${Result}"
+REPORT_EXPECTED = "${Expected}"
+REPORT_ACTUAL = "${Actual}"
+REPORT_ERROR = "${Error}"
+REPORT_RETURN_CODE = "${ReturnCode}"
+REPORT_TIME = "${Time}"
+
+STATUS_PASSED = "PASSED"
+STATUS_PASSED_CLASS = "test_status--passed"
+STATUS_FAILED = "FAILED"
+STATUS_FAILED_CLASS = "test_status--failed"
+
+NO_TIME = "not measured"
+
+
+def make_html_report(test):
+ actual_report = test.report if test.report is not None else TestReport("", "", -1)
+ test_expected, test_actual = make_output_diff_html(test.expected, actual_report.output)
+ test_expected = "\n".join(test_expected)
+ test_actual = "\n".join(test_actual)
+
+ with open(path.join(path.dirname(path.abspath(__file__)), "report_template.html"), "r") as fp:
+ report = fp.read()
+
+ report = report.replace(REPORT_TITLE, test.test_id)
+ report = report.replace(REPORT_PATH, test.path)
+ if test.passed:
+ report = report.replace(REPORT_STATUS_CLASS, STATUS_PASSED_CLASS)
+ report = report.replace(REPORT_STATUS, STATUS_PASSED)
+ else:
+ report = report.replace(REPORT_STATUS_CLASS, STATUS_FAILED_CLASS)
+ report = report.replace(REPORT_STATUS, STATUS_FAILED)
+ if test.time is not None:
+ report = report.replace(REPORT_TIME, f"{round(test.time, 2)} sec")
+ else:
+ report = report.replace(REPORT_TIME, NO_TIME)
+
+ report = report.replace(REPORT_REPRODUCE, test.reproduce)
+ report = report.replace(REPORT_EXPECTED, test_expected)
+ report = report.replace(REPORT_ACTUAL, test_actual)
+ report = report.replace(REPORT_ERROR, actual_report.error)
+ if test.report is None:
+ report = report.replace(REPORT_RETURN_CODE, "Not defined")
+ else:
+ report = report.replace(REPORT_RETURN_CODE, str(actual_report.return_code))
+
+ return report
+
+
+def make_md_report(test):
+ actual_report = test.report if test.report is not None else TestReport("", "", -1)
+ test_result = make_output_diff_md(test.expected, actual_report.output)
+ test_result = "\n".join(test_result)
+
+ with open(path.join(path.dirname(path.abspath(__file__)), "report_template.md"), "r") as fp:
+ report = fp.read()
+
+ report = report.replace(REPORT_TITLE, test.test_id)
+ report = report.replace(REPORT_PATH, test.path)
+ if test.passed:
+ report = report.replace(REPORT_STATUS_CLASS, STATUS_PASSED_CLASS)
+ report = report.replace(REPORT_STATUS, STATUS_PASSED)
+ else:
+ report = report.replace(REPORT_STATUS_CLASS, STATUS_FAILED_CLASS)
+ report = report.replace(REPORT_STATUS, STATUS_FAILED)
+ if test.time is not None:
+ report = report.replace(REPORT_TIME, f"{round(test.time, 2)} sec")
+ else:
+ report = report.replace(REPORT_TIME, NO_TIME)
+
+ report = report.replace(REPORT_REPRODUCE, test.reproduce)
+ report = report.replace(REPORT_RESULT, test_result)
+ report = report.replace(REPORT_ERROR, actual_report.error)
+ if test.report is None:
+ report = report.replace(REPORT_RETURN_CODE, "Not defined")
+ else:
+ report = report.replace(REPORT_RETURN_CODE, str(actual_report.return_code))
+
+ return report
+
+
+def make_text_report(test):
+ result = "PASSED" if test.passed else "FAILED"
+ time_line = f"{round(test.time, 2)} sec" if test.time is not None else NO_TIME
+ return "\n".join([
+ f"{test.test_id}",
+ f"{test.path}\n",
+ f"Result: {result}",
+ f"Execution time: {time_line}",
+ f"Steps to reproduce:{test.reproduce}\n",
+ f"Expected output:\n{test.expected}\n",
+ f"Actual output (stdout):\n{test.report.output}\n",
+ f"Actual error (stderr):\n{test.report.error}\n",
+ f"Actual return code:\n{test.report.return_code}\n"])
diff --git a/test/runner/report_format.py b/test/runner/report_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0dd4cad26a79804e73050fded2d9ecd6ae1727a
--- /dev/null
+++ b/test/runner/report_format.py
@@ -0,0 +1,6 @@
+from enum import Enum
+
+
+class ReportFormat(Enum):
+ HTML = "html"
+ MD = "md"
diff --git a/test/runner/report_template.html b/test/runner/report_template.html
new file mode 100644
index 0000000000000000000000000000000000000000..3432279dd666de31ad616eeec51e9d0a9dce14a7
--- /dev/null
+++ b/test/runner/report_template.html
@@ -0,0 +1,91 @@
+
+
+
+
+ ${Title}
+
+
+
+
+
+${Path}
+Result: ${Status}
+Execution time: ${Time}
+
+
+${Reproduce}
+
+
+
+
+ ${Expected}
+
+
+
+ ${Actual}
+
${Error}
+
Return code: ${ReturnCode}
+
+
+
+
+
\ No newline at end of file
diff --git a/test/runner/report_template.md b/test/runner/report_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..4a02cc6feffdec1b39bfd7bdb8e77653c3ea5dae
--- /dev/null
+++ b/test/runner/report_template.md
@@ -0,0 +1,24 @@
+# ${Title}
+
+${Path}
+
+Result: **${Status}**
+
+Execution time: **${Time}**
+
+## Steps to reproduce
+
+```commandline
+${Reproduce}
+```
+
+| Expected output | Actual output |
+|-----------------|---------------|
+${Result}
+
+Error:
+```commandline
+${Error}
+```
+
+Return code: ${ReturnCode}
diff --git a/test/runner/runner.py b/test/runner/runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..607e8cb259d7b94e607f7d7a262b8843ab18255c
--- /dev/null
+++ b/test/runner/runner.py
@@ -0,0 +1,40 @@
+import os
+from dotenv import load_dotenv
+
+from runner_js_hermes import RunnerJSHermes
+from runner_js_parser import RunnerJSParser
+from runner_js_test262 import RunnerJSTest262
+from starter import get_args
+
+
+def main():
+ dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
+ if os.path.exists(dotenv_path):
+ load_dotenv(dotenv_path)
+
+ args = get_args()
+
+ runners = []
+
+ if args.regression:
+ runner_main = RunnerJSParser(args)
+ runner_main.add_directory("parser/js", "js", flags=["--parse-only"])
+ runners.append(runner_main)
+
+ if args.test262:
+ runners.append(RunnerJSTest262(args))
+
+ if args.hermes:
+ runners.append(RunnerJSHermes(args))
+
+ failed_tests = 0
+
+ for runner in runners:
+ runner.run()
+ failed_tests += runner.summarize()
+
+ exit(0 if failed_tests == 0 else 1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/runner/runner_base.py b/test/runner/runner_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..506e7c46ac7eaf72b9c4a4f6622aa69b5a542034
--- /dev/null
+++ b/test/runner/runner_base.py
@@ -0,0 +1,246 @@
+import fnmatch
+import multiprocessing
+import re
+from datetime import datetime
+from glob import glob
+from itertools import chain
+from os import path, makedirs, getenv
+
+from utils import write_2_file
+
+CONST_COMMENT = ["#"]
+test_comment_expr = re.compile(r"^\s*(?P[^# ]+)?(\s*#\s*(?P.+))?", re.MULTILINE)
+
+
+def load_list(test_root, p):
+ def transform(line):
+ test, comment = get_test_and_comment_from_line(line.strip(" \n"))
+ if test is not None:
+ test = path.join(test_root, test)
+ return test
+
+ if path.exists(p):
+ return set(map(lambda line: transform(line), open(p, 'r')))
+ else:
+ return set([])
+
+
+def get_test_id(file, start_directory):
+ return path.relpath(path.splitext(file)[0], start_directory)
+
+
+def is_line_a_comment(line):
+ for s in CONST_COMMENT:
+ if line.startswith(s):
+ return True
+ return False
+
+
+def is_line_a_test(line):
+ return len(line) and not is_line_a_comment(line)
+
+
+def get_test_and_comment_from_line(line):
+ r = test_comment_expr.search(line)
+ return r["test"], r["comment"]
+
+
+def get_test_from_line(line):
+ test, comment = get_test_and_comment_from_line(line)
+ return test
+
+
+def get_comment_from_line(line):
+ test, comment = get_test_and_comment_from_line(line)
+ return comment
+
+
+def correct_path(root, test_list):
+ return path.abspath(test_list) if path.exists(test_list) else path.join(root, test_list)
+
+
+def is_test_in_list(test, test_list):
+ return any(test_line.endswith(test) for test_line in test_list)
+
+
+class Runner:
+ def __init__(self, args, name):
+ current_folder_parent = path.dirname(path.dirname(path.abspath(__file__)))
+ # Roots:
+ # directory where test files are located - it's either set explicitly to the absolute value
+ # or the current folder (where this python file is located!) parent
+ self.test_root = args.test_root if args.test_root is not None else current_folder_parent
+ # directory where list files (files with list of ignored, excluded, and other tests) are located
+ # it's either set explicitly to the absolute value or
+ # the current folder (where this python file is located!) parent
+ self.list_root = args.list_root if args.list_root is not None else current_folder_parent
+ # directory where report files for failed tests will be saved -
+ # it's either set explicitly to the absolute value or
+ # the /tmp//reports folder
+ self.report_root = args.report_root if args.report_root is not None else \
+ path.join(path.sep, "tmp", name, "reports")
+ makedirs(self.report_root, exist_ok=True)
+ # root directory containing bin folder with binary files
+ self.build_dir = args.build_dir
+
+ self.args = args
+ self.name = name
+
+ # Lists:
+ # excluded test is a test what should not be loaded and should be tried to run
+ # excluded_list: either absolute path or path relative from list_root to the file with the list of such tests
+ self.excluded_lists = []
+ self.excluded_tests = set([])
+ # ignored test is a test what should be loaded and executed, but its failure should be ignored
+ # ignored_list: either absolute path or path relative from list_root to the file with the list of such tests
+ # aka: kfl = known failures list
+ self.ignored_lists = []
+ self.ignored_tests = set([])
+ # list of file names, each is a name of a test. Every test should be executed
+ # So, it can contain ignored tests, but cannot contain excluded tests
+ self.tests = set([])
+ # list of results of every executed test
+ self.results = []
+ # name of file with a list of only tests what should be executed
+ # if it's specified other tests are not executed
+ self.explicit_list = correct_path(self.list_root, args.test_list) if args.test_list is not None else None
+ # name of the single test file in form of a relative path from test_root what should be executed
+ # if it's specified other tests are not executed even if test_list is set
+ self.explicit_test = args.test_file
+
+ # Counters:
+ # failed + ignored + passed + excluded_after = len of all executed tests
+ # failed + ignored + passed + excluded_after + excluded = len of full set of tests
+ self.failed = 0
+ self.ignored = 0
+ self.passed = 0
+ self.excluded = 0
+ # Test chosen to execute can detect itself as excluded one
+ self.excluded_after = 0
+
+ self.update = args.update
+
+ def load_tests_from_list(self, list_name):
+ list_path = correct_path(self.list_root, list_name)
+ return list(filter(
+ lambda test: test is not None,
+ load_list(self.test_root, list_path)
+ ))
+
+ def load_tests_from_lists(self, lists):
+ m = map(lambda list_name: self.load_tests_from_list(list_name), lists)
+ return list(chain(*m))
+
+ # Read excluded_lists and load list of excluded tests
+ def load_excluded_tests(self):
+ self.excluded_tests.update(self.load_tests_from_lists(self.excluded_lists))
+ self.excluded = len(self.excluded_tests)
+
+ # Read ignored_lists and load list of ignored tests
+ def load_ignored_tests(self):
+ self.ignored_tests.update(self.load_tests_from_lists(self.ignored_lists))
+
+ # Read explicit_list and load list of executed tests
+ def load_explicit_tests(self):
+ if self.explicit_list is not None:
+ return self.load_tests_from_list(self.explicit_list)
+ else:
+ return []
+
+ # Load one explicitly specified test what should be executed
+ def load_explicit_test(self):
+ if self.explicit_test is not None:
+ return [correct_path(self.test_root, self.explicit_test)]
+ else:
+ return []
+
+ # Browse the directory, search for files with the specified extension
+ # and add them as tests
+ def add_directory(self, directory, extension, flags):
+ test_files = []
+ if self.explicit_test is not None:
+ test_files.extend(self.load_explicit_test())
+ elif self.explicit_list is not None:
+ test_files.extend(self.load_explicit_tests())
+ else:
+ self.load_excluded_tests()
+ self.load_ignored_tests()
+ if len(test_files) == 0:
+ glob_expression = path.join(directory, f"**/*.{extension}")
+ test_files.extend(fnmatch.filter(
+ glob(glob_expression, recursive=True),
+ path.join(directory, self.args.filter)
+ ))
+
+ self.tests.update(list(map(
+ lambda x: self.create_test(x, flags, is_test_in_list(x, self.ignored_tests)),
+ filter(
+ lambda x: not is_test_in_list(x, self.excluded_tests),
+ test_files
+ )
+ )))
+
+ def create_test(self, test_file, flags, is_ignored):
+ pass
+
+ def run_test(self, test):
+ return test.run()
+
+ def run(self):
+ pool = multiprocessing.Pool()
+ result_iter = pool.imap_unordered(self.run_test, self.tests, chunksize=32)
+ pool.close()
+
+ if self.args.progress:
+ from tqdm import tqdm
+ result_iter = tqdm(result_iter, total=len(self.tests))
+
+ results = []
+ for res in result_iter:
+ results.append(res)
+
+ self.results = results
+ pool.join()
+
+ def summarize(self):
+ result = self.summarize_test_statistics()
+ if self.args.time_report:
+ self.prepare_time_report()
+ return result
+
+ def summarize_test_statistics(self):
+ return 0
+
+ def prepare_time_report(self):
+ time_edges = [int(s) for s in getenv('TIME_EDGES').replace('"', "").split()]
+ times = []
+ for i in range(len(time_edges) + 1):
+ times.append([])
+ for r in self.results:
+ if r.time is None:
+ continue
+ for i in range(len(time_edges)):
+ if r.time < time_edges[i]:
+ times[i].append(r.test_id)
+ break
+ else:
+ times[-1].append(f"{r.test_id} # {round(r.time, 2)} sec")
+
+ print(f"Test execution time")
+ time_report = ""
+ for i in range(len(time_edges)):
+ print(f"Less {time_edges[i]} sec: {len(times[i])}")
+ time_report += f"\nLess {time_edges[i]} sec:\n"
+ for t in times[i]:
+ time_report += f"{t}\n"
+ print(f"More {time_edges[-1]} sec: {len(times[-1])}")
+ time_report += f"\n{time_edges[-1]} sec or more:\n"
+ for t in times[-1]:
+ time_report += f"{t}\n"
+
+ timestamp = int(datetime.timestamp(datetime.now()))
+ time_report_path = path.join(self.report_root, f"{self.name}-time_report-{timestamp}.txt")
+
+ write_2_file(time_report_path, time_report)
+ if self.args.verbose:
+ print(f"Time report saved to {time_report_path}")
diff --git a/test/runner/runner_js.py b/test/runner/runner_js.py
new file mode 100644
index 0000000000000000000000000000000000000000..23044b5dd2cbb242eeab405fb2393e89805f2533
--- /dev/null
+++ b/test/runner/runner_js.py
@@ -0,0 +1,278 @@
+import subprocess
+import sys
+from datetime import datetime
+from glob import glob
+from os import path, environ, makedirs
+from typing import List
+
+from configuration_kind import ConfigurationKind
+from fail_kind import FailKind
+from params import TestEnv
+from report_format import ReportFormat
+from runner_base import Runner
+from utils import write_2_file
+
+INDEX_TITLE = "${Title}"
+INDEX_OPTIONS = "${Options}"
+INDEX_TOTAL = "${Total}"
+INDEX_PASSED = "${Passed}"
+INDEX_FAILED = "${Failed}"
+INDEX_IGNORED = "${Ignored}"
+INDEX_EXCLUDED_LISTS = "${ExcludedThroughLists}"
+INDEX_EXCLUDED_OTHER = "${ExcludedByOtherReasons}"
+INDEX_TEST_NAME = "${TestName}"
+INDEX_FAILED_TESTS_LIST = "${FailedTestsList}"
+
+
+class RunnerJS(Runner):
+ def __init__(self, args, name):
+ Runner.__init__(self, args, name)
+ self.cmd_env = environ.copy()
+
+ if args.aot:
+ self.conf_kind = ConfigurationKind.AOT
+ is_aot_full = len(list(filter(
+ lambda arg:"--compiler-inline-full-intrinsics=true" in arg,
+ args.aot_args
+ ))) > 0
+ if is_aot_full:
+ self.conf_kind = ConfigurationKind.AOT_FULL
+ elif args.jit:
+ self.conf_kind = ConfigurationKind.JIT
+ elif args.irtoc:
+ self.conf_kind = ConfigurationKind.IRTOC
+ elif args.quick:
+ self.conf_kind = ConfigurationKind.QUICK
+ else:
+ self.conf_kind = ConfigurationKind.INT
+
+ for san in ["ASAN_OPTIONS", "TSAN_OPTIONS", "MSAN_OPTIONS", "LSAN_OPTIONS"]:
+ # we don't want to interpret asan failures as SyntaxErrors
+ self.cmd_env[san] = ":exitcode=255"
+
+ self.es2panda = path.join(self.build_dir, "bin", "es2panda")
+ if not path.isfile(self.es2panda):
+ raise Exception(f"Cannot find es2panda binary: {self.es2panda}")
+
+ self.runtime = path.join(args.build_dir, 'bin', 'ark')
+ if not path.isfile(self.runtime):
+ raise Exception(f"Cannot find runtime binary: {self.runtime}")
+
+ self.cmd_prefix = []
+
+ if args.arm64_qemu:
+ self.cmd_prefix = ["qemu-aarch64", "-L", "/usr/aarch64-linux-gnu/"]
+
+ if args.arm32_qemu:
+ self.cmd_prefix = ["qemu-arm", "-L", "/usr/arm-linux-gnueabi"]
+
+ ecmastdlib_abc = f"{self.build_dir}/pandastdlib/arkstdlib.abc"
+
+ self.quick_args = []
+ if self.conf_kind == ConfigurationKind.QUICK:
+ self.ark_quick = path.join(args.build_dir, 'bin', 'arkquick')
+ if not path.isfile(self.runtime):
+ raise Exception(f"Cannot find arkquick binary: {self.runtime}")
+ ecmastdlib_abc = self.generate_quick_stdlib(ecmastdlib_abc, args.verbose)
+ else:
+ self.ark_quick = ""
+
+ self.runtime_args = [
+ f'--boot-panda-files={ecmastdlib_abc}',
+ '--load-runtimes=ecmascript',
+ f'--gc-type={args.gc_type}',
+ f'--heap-verifier={args.heap_verifier}',
+ ]
+
+ if not args.no_gip:
+ self.runtime_args += ['--run-gc-in-place']
+
+ if self.conf_kind in [ConfigurationKind.AOT, ConfigurationKind.AOT_FULL]:
+ self.arkaot = path.join(self.build_dir, 'bin', 'ark_aot')
+ if not path.isfile(self.arkaot):
+ raise Exception(f"Cannot find aot binary: {self.arkaot}")
+
+ self.aot_args = [
+ f'--boot-panda-files={ecmastdlib_abc}',
+ '--load-runtimes=ecmascript',
+ f'--gc-type={args.gc_type}',
+ f'--heap-verifier={args.heap_verifier}',
+ ]
+
+ if not args.no_gip:
+ self.aot_args += ['--run-gc-in-place']
+
+ self.aot_args += args.aot_args
+ else:
+ self.arkaot = None
+ self.aot_args = []
+
+ self.test_env = TestEnv(
+ args=args,
+ conf_kind=self.conf_kind,
+ cmd_prefix=self.cmd_prefix,
+ cmd_env=self.cmd_env,
+ es2panda=self.es2panda,
+ runtime=self.runtime,
+ runtime_args=self.runtime_args,
+ arkaout=self.arkaot,
+ aot_args=self.aot_args,
+ ark_quick=self.ark_quick,
+ quick_args=self.quick_args
+ )
+
+ def generate_quick_stdlib(self, ecmastdlib_abc, verbose):
+ cmd = self.cmd_prefix + [self.ark_quick]
+ cmd.extend(self.quick_args)
+ src_abc = ecmastdlib_abc
+ dst_abc = '%s.quick%s' % path.splitext(src_abc)
+ cmd.extend([src_abc, dst_abc])
+
+ if verbose:
+ print('quick ecmastdlib: %s' % ' '.join(cmd), file=sys.stderr)
+
+ process = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.cmd_env)
+
+ try:
+ out, err = process.communicate(timeout=600)
+ except subprocess.TimeoutExpired:
+ process.kill()
+ raise Exception("Cannot quick %s: timeout" % src_abc)
+
+ if process.returncode != 0:
+ raise Exception("Cannot quick %s: %d" % (src_abc, process.returncode))
+
+ return dst_abc
+
+ def summarize_list(self, name, tests_list):
+ if len(tests_list):
+ tests_list.sort(key=lambda t: t.path)
+ print("# " + name)
+ for test in tests_list:
+ print(test.path)
+ if self.args.error:
+ print("steps:", test.reproduce)
+ print(test.report.error if test.report is not None else "")
+ print("")
+
+ def summarize_test_statistics(self):
+ print("")
+
+ fail_lists = {}
+ for kind in FailKind:
+ fail_lists[kind] = []
+ ignored_still_failed = []
+ ignored_but_passed = []
+
+ self.failed = 0
+ self.ignored = 0
+ self.passed = 0
+ self.excluded_after = 0
+
+ timestamp = int(datetime.timestamp(datetime.now()))
+ failed_tests = []
+
+ for test_result in self.results:
+ if test_result.excluded:
+ self.excluded_after += 1
+ continue
+
+ assert (test_result.passed is not None)
+
+ report_root = path.join(self.report_root, "known" if test_result.ignored else "new")
+
+ if not test_result.passed:
+ if self.args.report_format == ReportFormat.HTML.value:
+ report_path = path.join(report_root, f"{test_result.test_id}.report-{timestamp}.html")
+ failed_tests.append(report_path)
+ else:
+ report_path = path.join(report_root, f"{test_result.test_id}.report-{timestamp}.md")
+ print(f"Formatted report is saved to {report_path}")
+ makedirs(path.dirname(report_path), exist_ok=True)
+ write_2_file(report_path, test_result.get_formatted_report())
+ text_report_path = path.join(report_root, f"{test_result.test_id}.report-{timestamp}.log")
+ write_2_file(text_report_path, test_result.get_text_report())
+ print(f"Plain text report is saved to {text_report_path}")
+
+ if not test_result.passed:
+ if test_result.ignored:
+ self.ignored += 1
+ ignored_still_failed.append(test_result)
+ else:
+ self.failed += 1
+ fail_lists[test_result.fail_kind].append(test_result)
+ else:
+ self.passed += 1
+ if test_result.ignored:
+ ignored_but_passed.append(test_result)
+
+ total_tests = len(self.tests) + self.excluded
+
+ if self.args.report_format == ReportFormat.HTML.value:
+ self.create_html_index(failed_tests, total_tests, timestamp)
+
+ if not self.update:
+ for kind in FailKind:
+ self.summarize_list(kind.name, fail_lists[kind])
+
+ print("Summary(%s):" % self.name)
+ print("\033[37mTotal: %5d" % total_tests)
+ print("\033[92mPassed: %5d" % self.passed)
+ print("\033[91mFailed: %5d" % self.failed)
+ print("\033[37mIgnored: %5d" % self.ignored)
+ print("\033[37mExcluded through lists: %5d" % self.excluded)
+ print("\033[37mExcluded by other reasons: %5d" % self.excluded_after)
+ print("\033[0m")
+
+ return self.failed
+
+ def create_html_index(self, failed_tests, total_tests, timestamp):
+ line_template = '${TestName}'
+ with open(path.join(path.dirname(path.abspath(__file__)), "index_template.html"), "r") as fp:
+ report = fp.read()
+
+ report = report.replace(INDEX_TITLE, f"Summary for {self.name} {datetime.now()}")
+ report = report.replace(INDEX_OPTIONS, self.get_str_args())
+ report = report.replace(INDEX_TOTAL, str(total_tests))
+ report = report.replace(INDEX_PASSED, str(self.passed))
+ report = report.replace(INDEX_FAILED, str(self.failed))
+ report = report.replace(INDEX_IGNORED, str(self.ignored))
+ report = report.replace(INDEX_EXCLUDED_LISTS, str(self.excluded))
+ report = report.replace(INDEX_EXCLUDED_OTHER, str(self.excluded_after))
+
+ failed_tests_report = []
+ start = len(self.report_root) + 1
+ for failed in failed_tests:
+ failed_tests_report.append(line_template.replace(INDEX_TEST_NAME, failed[start:]))
+
+ report = report.replace(INDEX_FAILED_TESTS_LIST, "\n".join(failed_tests_report))
+
+ report_path = path.join(self.report_root, f"{self.name}.report-{timestamp}.html")
+ write_2_file(report_path, report)
+ print(f"Formatted report is saved to {report_path}")
+
+ def get_str_args(self):
+ args = str(self.args)
+ args = args[args.find("(") + 1:-1].split(",")
+ args = list(map(
+ lambda x: x.strip(),
+ args
+ ))
+ return "\n".join(args)
+
+ def collect_excluded_test_lists(self, extra_list=None, test_name: str = None):
+ self.excluded_lists.extend(self.collect_test_lists("excluded", extra_list, test_name))
+
+ def collect_ignored_test_lists(self, extra_list=None, test_name: str = None):
+ self.ignored_lists.extend(self.collect_test_lists("ignored", extra_list, test_name))
+
+ def collect_test_lists(self, kind: str, extra_lists: List[str] = None, test_name: str = None) -> List[str]:
+ test_lists = extra_lists[:] if extra_lists else []
+ test_name = test_name if test_name else self.name
+ template_names = [f"{test_name}*-{kind}.txt", f"{test_name}*-{kind}-{self.conf_kind.value}.txt"]
+ for template_name in template_names:
+ glob_expression = path.join(self.list_root, f"**/{template_name}")
+ test_lists.extend(glob(glob_expression, recursive=True))
+
+ return test_lists
\ No newline at end of file
diff --git a/test/runner/runner_js_hermes.py b/test/runner/runner_js_hermes.py
new file mode 100644
index 0000000000000000000000000000000000000000..79cf36021cc9df70ccbad6336c7d216c8e78be7d
--- /dev/null
+++ b/test/runner/runner_js_hermes.py
@@ -0,0 +1,32 @@
+from runner_base import correct_path, get_test_id
+from runner_js import RunnerJS
+from test_js_hermes import TestJSHermes
+from util_hermes import UtilHermes
+
+
+class RunnerJSHermes(RunnerJS):
+ def __init__(self, args):
+ RunnerJS.__init__(self, args, "hermes")
+
+ self.collect_excluded_test_lists()
+ self.collect_ignored_test_lists()
+
+ for arg in args.aot_args:
+ if "--compiler-inline-full-intrinsics=true" in arg:
+ self.excluded_lists.append(
+ correct_path(self.list_root, f"{self.name}-excluded-aot-inline-full.txt")
+ )
+ break
+
+ self.util = UtilHermes(self.test_env.args.verbose)
+ self.test_env.util = self.util
+ self.test_root = self.util.generate(
+ self.build_dir,
+ args.progress
+ )
+ self.add_directory(self.test_root, "js", [])
+
+ def create_test(self, test_file, flags, is_ignored):
+ test = TestJSHermes(self.test_env, test_file, flags, get_test_id(test_file, self.test_root))
+ test.ignored = is_ignored
+ return test
diff --git a/test/runner/runner_js_parser.py b/test/runner/runner_js_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a134867c091804abb6c883bdff5ce08782d7f54
--- /dev/null
+++ b/test/runner/runner_js_parser.py
@@ -0,0 +1,22 @@
+from os import path
+
+from runner_base import get_test_id
+from runner_js import RunnerJS
+from test_js_parser import TestJSParser
+
+
+class RunnerJSParser(RunnerJS):
+ def __init__(self, args):
+ super(RunnerJSParser, self).__init__(args, "parser-js")
+
+ self.collect_excluded_test_lists()
+ self.collect_ignored_test_lists()
+
+ def add_directory(self, directory, extension, flags):
+ new_dir = path.join(self.test_root, directory)
+ super(RunnerJSParser, self).add_directory(new_dir, extension, flags)
+
+ def create_test(self, test_file, flags, is_ignored):
+ test = TestJSParser(self.test_env, test_file, flags, get_test_id(test_file, self.test_root))
+ test.ignored = is_ignored
+ return test
diff --git a/test/runner/runner_js_test262.py b/test/runner/runner_js_test262.py
new file mode 100644
index 0000000000000000000000000000000000000000..21a8807233c9f6e6b2b5c99fce3e99375d3d406e
--- /dev/null
+++ b/test/runner/runner_js_test262.py
@@ -0,0 +1,34 @@
+from os import path
+
+from runner_base import correct_path, get_test_id
+from runner_js import RunnerJS
+from test_js_test262 import TestJSTest262
+from util_test262 import UtilTest262
+
+
+class RunnerJSTest262(RunnerJS):
+ def __init__(self, args):
+ RunnerJS.__init__(self, args, "test262-ark")
+ self.ignored_name_prefix = "test262"
+
+ self.collect_excluded_test_lists(test_name=self.ignored_name_prefix)
+ self.collect_ignored_test_lists(test_name=self.ignored_name_prefix)
+
+ if args.bco:
+ self.bco_list = correct_path(self.list_root, f"{self.ignored_name_prefix}skiplist-bco.txt")
+ self.bco_tests = self.load_tests_from_lists([self.bco_list])
+
+ self.util = UtilTest262()
+ self.test_root = self.util.generate(
+ self.build_dir,
+ path.join(self.list_root, "runner/test262harness.js"),
+ args.progress)
+ self.test_env.util = self.util
+
+ self.add_directory(self.test_root, "js", [])
+
+ def create_test(self, test_file, flags, is_ignored):
+ with_optimizer = test_file not in self.bco_tests
+ test = TestJSTest262(self.test_env, test_file, flags, with_optimizer, get_test_id(test_file, self.test_root))
+ test.ignored = is_ignored
+ return test
diff --git a/test/runner/starter.py b/test/runner/starter.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1f1792f1637e4c3a3060aea093b0b10793104f2
--- /dev/null
+++ b/test/runner/starter.py
@@ -0,0 +1,139 @@
+import argparse
+from os import path
+
+from report_format import ReportFormat
+
+
+def is_directory(parser, arg):
+ if not path.isdir(arg):
+ parser.error(f"The directory {arg} does not exist")
+
+ return path.abspath(arg)
+
+
+def is_file(parser, arg):
+ if not path.isfile(arg):
+ parser.error(f"The file {arg} does not exist")
+
+ return path.abspath(arg)
+
+
+def check_timeout(value):
+ ivalue = int(value)
+ if ivalue <= 0:
+ raise argparse.ArgumentTypeError(f"{value} is an invalid timeout value")
+
+ return ivalue
+
+
+def get_args():
+ parser = argparse.ArgumentParser(description="Regression test runner")
+ parser.add_argument(
+ 'build_dir', type=lambda arg: is_directory(parser, arg),
+ help='build directory')
+ parser.add_argument(
+ '--test262', '-t', action='store_true', dest='test262', default=False,
+ help='run test262 tests')
+ parser.add_argument(
+ '--regression', '-r', action='store_true', dest='regression',
+ default=False, help='run regression tests')
+ parser.add_argument(
+ '--hermes', action='store_true', dest='hermes',
+ default=False, help='run Hermes tests')
+ parser.add_argument(
+ '--tsc', action='store_true', dest='tsc',
+ default=False, help='run tsc tests')
+
+ parser.add_argument(
+ '--test-root', dest='test_root', default=None, type=lambda arg: is_directory(parser, arg),
+ help='directory with test file. If not set the module directory is used')
+ parser.add_argument(
+ '--list-root', dest='list_root', default=None, type=lambda arg: is_directory(parser, arg),
+ help='directory with files what are lists of excluded/ignored tests. If not set the module directory is used')
+ parser.add_argument(
+ '--report-root', dest='report_root', default=None,
+ help='directory where report files for failed tests will be saved. If not set the module directory is used')
+ parser.add_argument(
+ '--report-format', dest='report_format', default=ReportFormat.MD.value,
+ help='format of report files. Possible values html or md. If not set md format is used')
+
+ parser.add_argument(
+ '--no-skip', action='store_false', dest='skip', default=True,
+ help='don\'t use skiplists')
+ parser.add_argument(
+ '--update', action='store_true', dest='update', default=False,
+ help='update skiplist')
+ parser.add_argument(
+ '--update-expected', action='store_true', dest='update_expected', default=False,
+ help='update files with expected results')
+
+ parser.add_argument(
+ '--filter', '-f', action='store', dest='filter',
+ default="*", help='test filter regexp')
+ parser.add_argument(
+ '--test-list', dest='test_list', default=None,
+ help='run only the tests listed in this file')
+ parser.add_argument(
+ '--test-file', dest='test_file', default=None,
+ help='run only one test specified here')
+
+ parser.add_argument(
+ '--es2panda-timeout', type=check_timeout,
+ dest='es2panda_timeout', default=60, help='es2panda translator timeout')
+ parser.add_argument(
+ '--paoc-timeout', type=check_timeout,
+ dest='paoc_timeout', default=600, help='paoc compiler timeout')
+ parser.add_argument(
+ '--timeout', type=check_timeout,
+ dest='timeout', default=10, help='JS runtime timeout')
+
+ parser.add_argument(
+ '--no-run-gc-in-place', action='store_true', dest='no_gip', default=False,
+ help='enable --run-gc-in-place mode')
+ parser.add_argument(
+ '--gc-type', dest='gc_type', default="g1-gc", help='Type of garbage collector')
+ parser.add_argument(
+ '--heap-verifier', dest='heap_verifier', default="fail_on_verification",
+ help='Heap verifier options')
+ parser.add_argument(
+ '--aot', action='store_true', dest='aot', default=False,
+ help='use AOT compilation')
+ parser.add_argument(
+ '--no-bco', action='store_false', dest='bco', default=True,
+ help='disable bytecodeopt')
+ parser.add_argument(
+ '--jit', action='store_true', dest='jit', default=False,
+ help='use JIT in interpreter')
+ parser.add_argument(
+ '--arm64-compiler-skip', action='store_true', dest='arm64_compiler_skip', default=False,
+ help='use skiplist for tests failing on aarch64 in AOT or JIT mode')
+ parser.add_argument(
+ '--arm64-qemu', action='store_true', dest='arm64_qemu', default=False,
+ help='launch all binaries in qemu aarch64')
+ parser.add_argument(
+ '--arm32-qemu', action='store_true', dest='arm32_qemu', default=False,
+ help='launch all binaries in qemu arm')
+ parser.add_argument(
+ '--aot-args', action='append', dest='aot_args', default=[],
+ help='Additional arguments that will passed to ark_aot')
+ parser.add_argument(
+ '--irtoc', action='store_true', dest='irtoc', default=False,
+ help='use irtoc in interpreter')
+ parser.add_argument(
+ '--quick', '-q', action='store_true', dest='quick', default=False,
+ help='use bytecode quickener')
+
+ parser.add_argument(
+ '--error', action='store_true', dest='error', default=False,
+ help='capture stderr')
+ parser.add_argument(
+ '--no-progress', action='store_false', dest='progress', default=True,
+ help='don\'t show progress bar')
+ parser.add_argument(
+ '--verbose', '-v', action='store_true', dest='verbose', default=False,
+ help='Enable verbose output')
+ parser.add_argument(
+ '--time-report', action='store_true', dest='time_report', default=False,
+ help='Log execution test time')
+
+ return parser.parse_args()
diff --git a/test/test262harness.js b/test/runner/test262harness.js
similarity index 100%
rename from test/test262harness.js
rename to test/runner/test262harness.js
diff --git a/test/runner/test_base.py b/test/runner/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..9be36d01c2fdd2b2042155e9766011011525c9b0
--- /dev/null
+++ b/test/runner/test_base.py
@@ -0,0 +1,63 @@
+# Test knows how to run test and defines whether the test is failed or passed
+from datetime import datetime
+
+from params import TestReport, TestEnv
+from report import make_html_report, make_md_report, make_text_report
+from report_format import ReportFormat
+
+
+class Test:
+ def __init__(self, test_env: TestEnv, test_path, flags, test_id="", update_expected=False):
+ self.test_env = test_env
+ # full path to the test file
+ self.path = test_path
+ self.flags = flags
+ self.test_id = test_id
+ self.update_expected = update_expected
+ self.expected = ""
+ # Contains fields output, error, and return_code of the last executed step
+ self.report: TestReport
+ self.report = None
+ # Test result: True if all steps passed, False is any step fails
+ self.passed = None
+ # If the test is mentioned in any ignored_list
+ self.ignored = False
+ # Test can detect itself as excluded additionally to excluded_tests
+ # In such case the test will be counted as `excluded by other reasons`
+ self.excluded = False
+ # Collect all executable commands
+ self.reproduce = ""
+ # Time to execute in seconds
+ self.time = None
+
+ def log_cmd(self, cmd):
+ self.reproduce += "\n" + ' '.join(cmd)
+
+ def run(self):
+ start = 0
+ if self.test_env.args.time_report:
+ start = datetime.now()
+ if self.test_env.args.verbose:
+ print(f"Going to execute: {self.path}")
+
+ result = self.do_run()
+
+ if self.test_env.args.time_report:
+ finish = datetime.now()
+ self.time = (finish - start).total_seconds()
+ if self.test_env.args.verbose:
+ print(f"{self.path} executed for {round(self.time, 2)}")
+
+ return result
+
+ def do_run(self):
+ return self
+
+ def get_formatted_report(self):
+ if self.test_env.args.report_format == ReportFormat.HTML.value:
+ return make_html_report(self)
+ else:
+ return make_md_report(self)
+
+ def get_text_report(self):
+ return make_text_report(self)
diff --git a/test/runner/test_js.py b/test/runner/test_js.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a6ff945a40365235b2f3cc0d776946546dcf3b8
--- /dev/null
+++ b/test/runner/test_js.py
@@ -0,0 +1,151 @@
+import subprocess
+import sys
+from os import path, remove
+from typing import List
+
+from configuration_kind import ConfigurationKind
+from fail_kind import FailKind
+from params import Params, TestReport
+from test_base import Test
+
+
+class TestJS(Test):
+ def __init__(self, test_env, test_path, flags, test_id, update_expected):
+ Test.__init__(self, test_env, test_path, flags, test_id, update_expected)
+ # If test fails it contains reason (of FailKind enum) of first failed step
+ # It's supposed if the first step is failed then no step is executed further
+ self.fail_kind = None
+
+ def run_one_step(self, name, params: Params, result_validator):
+ cmd = self.test_env.cmd_prefix + [params.executor]
+ cmd.extend(params.flags)
+
+ self.log_cmd(cmd)
+
+ if self.test_env.args.verbose:
+ print(f"Run {name}: {' '.join(cmd)}", file=sys.stderr)
+
+ passed = False
+ output = ""
+
+ try:
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=params.env)
+ try:
+ out, err = process.communicate(timeout=params.timeout)
+ output = out.decode("utf-8", errors="ignore")
+ error = err.decode("utf-8", errors="ignore")
+ return_code = process.returncode
+ passed = result_validator(output, error, return_code)
+ fail_kind = params.fail_kind_fail if not passed else None
+ except subprocess.TimeoutExpired as e:
+ print(f"{[params.executor]} failed with {e}")
+ fail_kind = params.fail_kind_timeout
+ error = fail_kind.name
+ return_code = process.returncode
+ process.kill()
+ except Exception as e:
+ print(f"{[params.executor]} failed with {e}")
+ fail_kind = params.fail_kind_other
+ error = fail_kind.name
+ return_code = -1
+
+ report = TestReport(
+ output=output,
+ error=error,
+ return_code=return_code
+ )
+
+ return passed, report, fail_kind
+
+ def run_es2panda(self, flags, test_abc, result_validator):
+ es2panda_flags = flags[:]
+ es2panda_flags.append("--thread=0")
+ if len(test_abc) > 0:
+ es2panda_flags.append(f"--output={test_abc}")
+
+ es2panda_flags.append(self.path)
+
+ params = Params(
+ executor=self.test_env.es2panda,
+ flags=es2panda_flags,
+ env=self.test_env.cmd_env,
+ timeout=self.test_env.args.es2panda_timeout,
+ fail_kind_fail=FailKind.ES2PANDA_FAIL,
+ fail_kind_timeout=FailKind.ES2PANDA_TIMEOUT,
+ fail_kind_other=FailKind.ES2PANDA_OTHER,
+ )
+
+ return self.run_one_step("es2panda", params, result_validator)
+
+ def run_runtime(self, test_an, test_abc, result_validator):
+ ark_flags = []
+ ark_flags.extend(self.test_env.runtime_args)
+ if self.test_env.conf_kind in [ConfigurationKind.AOT, ConfigurationKind.AOT_FULL]:
+ ark_flags.extend(["--aot-files", test_an])
+
+ if self.test_env.conf_kind == ConfigurationKind.JIT:
+ ark_flags.extend(['--compiler-enable-jit=true', '--compiler-hotness-threshold=0'])
+ else:
+ ark_flags.extend(['--compiler-enable-jit=false'])
+
+ if self.test_env.conf_kind == ConfigurationKind.IRTOC:
+ ark_flags.extend(['--interpreter-type=irtoc'])
+
+ ark_flags.extend([test_abc, "_GLOBAL::func_main_0"])
+
+ params = Params(
+ timeout=self.test_env.args.timeout,
+ executor=self.test_env.runtime,
+ flags=ark_flags,
+ env=self.test_env.cmd_env,
+ fail_kind_fail=FailKind.RUNTIME_FAIL,
+ fail_kind_timeout=FailKind.RUNTIME_TIMEOUT,
+ fail_kind_other=FailKind.RUNTIME_OTHER,
+ )
+
+ return self.run_one_step("ark", params, result_validator)
+
+ def run_aot(self, test_an, test_abc, result_validator):
+ aot_flags = []
+ aot_flags.extend(self.test_env.aot_args)
+ aot_flags = list(map(
+ lambda flag: flag.strip("'\""),
+ aot_flags
+ ))
+ aot_flags.extend(['--paoc-panda-files', test_abc])
+ aot_flags.extend(['--paoc-output', test_an])
+
+ if path.isfile(test_an):
+ remove(test_an)
+
+ params = Params(
+ timeout=self.test_env.args.paoc_timeout,
+ executor=self.test_env.arkaout,
+ flags=aot_flags,
+ env=self.test_env.cmd_env,
+ fail_kind_fail=FailKind.AOT_FAIL,
+ fail_kind_timeout=FailKind.AOT_TIMEOUT,
+ fail_kind_other=FailKind.AOT_OTHER,
+ )
+
+ return self.run_one_step("ark_aot", params, result_validator)
+
+ def run_ark_quick(self, flags: List[str], test_abc: str, result_validator):
+ quick_flags = flags[:]
+ quick_flags.extend(self.test_env.quick_args)
+
+ src_abc = test_abc
+ dst_abc = '%s.quick%s' % path.splitext(src_abc)
+ quick_flags.extend([src_abc, dst_abc])
+
+ params = Params(
+ timeout=self.test_env.args.timeout,
+ executor=self.test_env.ark_quick,
+ flags=quick_flags,
+ env=self.test_env.cmd_env,
+ fail_kind_fail=FailKind.QUICK_FAIL,
+ fail_kind_timeout=FailKind.QUICK_TIMEOUT,
+ fail_kind_other=FailKind.QUICK_OTHER,
+ )
+
+ return (*(self.run_one_step("ark_quick", params, result_validator)), dst_abc)
diff --git a/test/runner/test_js_hermes.py b/test/runner/test_js_hermes.py
new file mode 100644
index 0000000000000000000000000000000000000000..f360ebaeca9edf1e3fd2894b27a5915f6b01153f
--- /dev/null
+++ b/test/runner/test_js_hermes.py
@@ -0,0 +1,69 @@
+from os import path, makedirs
+
+from configuration_kind import ConfigurationKind
+from test_js import TestJS
+from utils import purify
+
+FOLDER_FOR_INTERMEDIATE_FILES = "intermediate"
+
+
+class TestJSHermes(TestJS):
+ def __init__(self, test_env, test_path, flags, test_id=None, update_expected=False):
+ TestJS.__init__(self, test_env, test_path, flags, test_id, update_expected)
+ self.tmp_dir = path.join(path.sep, "tmp", "hermes")
+ makedirs(self.tmp_dir, exist_ok=True)
+ self.util = self.test_env.util
+
+ def do_run(self):
+ self.expected = self.util.get_expected_value(self.path)
+
+ test_abc = path.join(self.tmp_dir, FOLDER_FOR_INTERMEDIATE_FILES, f"{self.test_id}.abc")
+ test_an = path.join(self.tmp_dir, FOLDER_FOR_INTERMEDIATE_FILES, f"{self.test_id}.an")
+
+ directory = path.dirname(test_abc)
+ makedirs(directory, exist_ok=True)
+
+ # Run es2panda
+ self.passed, self.report, self.fail_kind = self.run_es2panda(
+ [],
+ test_abc,
+ lambda o, e, rc: rc == 0
+ )
+
+ if not self.passed:
+ return self
+
+ # Run quick if required
+ if self.test_env.args.quick:
+ ark_flags = []
+ self.passed, self.report, self.fail_kind, test_abc = self.run_ark_quick(
+ ark_flags,
+ test_abc,
+ lambda o, e, rc: rc == 0
+ )
+
+ if not self.passed:
+ return self
+
+ # Run aot if required
+ if self.test_env.conf_kind in [ConfigurationKind.AOT, ConfigurationKind.AOT_FULL]:
+ self.passed, self.report, self.fail_kind = self.run_aot(
+ test_an,
+ test_abc,
+ lambda o, e, rc: rc == 0
+ )
+
+ if not self.passed:
+ return self
+
+ # Run ark
+ self.passed, self.report, self.fail_kind = self.run_runtime(
+ test_an,
+ test_abc,
+ lambda o, e, rc: self.ark_validate_result(o, e, rc, self.expected)
+ )
+
+ return self
+
+ def ark_validate_result(self, actual_output, _1, _2, expected_output):
+ return purify(actual_output) == purify(expected_output)
diff --git a/test/runner/test_js_parser.py b/test/runner/test_js_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1c99caac0423e6b3f00e149bba83aae37bc022d
--- /dev/null
+++ b/test/runner/test_js_parser.py
@@ -0,0 +1,31 @@
+from os import path
+
+from test_js import TestJS
+
+
+class TestJSParser(TestJS):
+ def __init__(self, test_env, test_path, flags, test_id=None, update_expected=False):
+ TestJS.__init__(self, test_env, test_path, flags, test_id, update_expected)
+
+ def do_run(self):
+ es2panda_flags = ["--dump-ast"]
+ es2panda_flags.extend(self.flags)
+
+ self.passed, self.report, self.fail_kind = self.run_es2panda(
+ flags=es2panda_flags,
+ test_abc="",
+ result_validator=self.es2panda_result_validator
+ )
+
+ return self
+
+ def es2panda_result_validator(self, actual_output, actual_error, actual_return_code):
+ expected_path = f"{path.splitext(self.path)[0]}-expected.txt"
+ try:
+ with open(expected_path, 'r') as fp:
+ self.expected = fp.read()
+ passed = self.expected == actual_output and actual_return_code in [0, 1]
+ except OSError:
+ passed = False
+
+ return passed
diff --git a/test/runner/test_js_test262.py b/test/runner/test_js_test262.py
new file mode 100644
index 0000000000000000000000000000000000000000..902c2bb6b6f197b35e58650e2b8647393e4d9e4a
--- /dev/null
+++ b/test/runner/test_js_test262.py
@@ -0,0 +1,87 @@
+from os import path, makedirs
+
+from configuration_kind import ConfigurationKind
+from test_js import TestJS
+
+FOLDER_FOR_INTERMEDIATE_FILES = "intermediate"
+
+
+class TestJSTest262(TestJS):
+ def __init__(self, test_env, test_path, flags, with_optimizer, test_id=None, update_expected=False):
+ TestJS.__init__(self, test_env, test_path, flags, test_id, update_expected)
+ self.with_optimizer = with_optimizer
+ self.need_exec = True
+ self.tmp_dir = path.join(path.sep, "tmp", "test262")
+ makedirs(self.tmp_dir, exist_ok=True)
+ self.util = self.test_env.util
+
+ def do_run(self):
+ with open(self.path, 'r') as fp:
+ header = self.util.get_header(fp.read())
+ desc = self.util.parse_descriptor(header)
+
+ test_abc = path.join(self.tmp_dir, FOLDER_FOR_INTERMEDIATE_FILES, f"{self.test_id}.abc")
+ test_an = path.join(self.tmp_dir, FOLDER_FOR_INTERMEDIATE_FILES, f"{self.test_id}.an")
+
+ directory = path.dirname(test_abc)
+ makedirs(directory, exist_ok=True)
+
+ # Run es2panda
+ es2panda_flags = []
+ if self.with_optimizer:
+ es2panda_flags.append("--opt-level=2")
+ if 'module' in desc['flags']:
+ es2panda_flags.append("--module")
+ if 'noStrict' in desc['flags']:
+ self.excluded = True
+ return self
+
+ self.passed, self.report, self.fail_kind = self.run_es2panda(
+ es2panda_flags,
+ test_abc,
+ lambda o, e, rc: self.es2panda_result_validator(o, e, rc, desc)
+ )
+
+ if not self.passed or not self.need_exec:
+ return self
+
+ # Run quick if required
+ if self.test_env.args.quick:
+ ark_flags = []
+ self.passed, self.report, self.fail_kind, test_abc = self.run_ark_quick(
+ ark_flags,
+ test_abc,
+ lambda o, e, rc: rc == 0
+ )
+
+ if not self.passed:
+ return self
+
+ # Run aot if required
+ if self.test_env.conf_kind in [ConfigurationKind.AOT, ConfigurationKind.AOT_FULL]:
+ self.passed, self.report, self.fail_kind = self.run_aot(
+ test_an,
+ test_abc,
+ lambda o, e, rc: rc == 0
+ )
+
+ if not self.passed:
+ return self
+
+ # Run ark
+ self.passed, self.report, self.fail_kind = self.run_runtime(
+ test_an,
+ test_abc,
+ lambda o, e, rc: self.util.validate_runtime_result(rc, e, desc, o)
+ )
+
+ return self
+
+ def es2panda_result_validator(self, actual_output, actual_error, actual_return_code, desc):
+ passed, self.need_exec = self.util.validate_parse_result(
+ actual_return_code,
+ actual_error,
+ desc,
+ actual_output
+ )
+ return passed
diff --git a/test/runner/util_hermes.py b/test/runner/util_hermes.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d9203b06b761140d3dc775b4bd5c123c28d394f
--- /dev/null
+++ b/test/runner/util_hermes.py
@@ -0,0 +1,49 @@
+import re
+from os import getenv
+
+import utils
+
+HERMES_URL = "HERMES_URL"
+HERMES_REVISION = "HERMES_REVISION"
+
+
+class UtilHermes:
+
+ def __init__(self, verbose=False):
+ self.print_expr = re.compile(r"^\s*print\((?P.+)\)", re.MULTILINE)
+ self.check_expr = re.compile(r"^\s*//\s?(?:CHECK-NEXT|CHECK-LABEL|CHECK):(.+)", re.MULTILINE)
+
+ self.verbose = verbose
+
+ self.hermes_url = getenv(HERMES_URL)
+ self.hermes_revision = getenv(HERMES_REVISION)
+ if self.hermes_url is None:
+ raise EnvironmentError(f"No {HERMES_URL} environment variable set")
+ if self.hermes_revision is None:
+ raise EnvironmentError(f"No {HERMES_REVISION} environment variable set")
+
+ def generate(self, build_dir, show_progress, source_path=None):
+ return utils.generate(
+ name="hermes",
+ url=self.hermes_url,
+ revision=self.hermes_revision,
+ build_dir=build_dir,
+ test_subdir="test/hermes",
+ show_progress=show_progress,
+ source_path=source_path
+ )
+
+ def get_expected_value(self, input_file):
+ with open(input_file, 'r') as fp:
+ input_str = fp.read()
+ result = "\n".join(map(
+ lambda x: x.strip(),
+ self.check_expr.findall(input_str)
+ ))
+ if len(result) == 0:
+ prints = self.print_expr.findall(input_str)
+ if len(prints) > 0 and self.verbose:
+ print(f"Test {input_file} contains {len(prints)} calls of print," +
+ " but does not contain any check." +
+ " Please correct the test")
+ return result
diff --git a/test/test262util.py b/test/runner/util_test262.py
similarity index 72%
rename from test/test262util.py
rename to test/runner/util_test262.py
index 5351b9ed8e3094caede27b3d5378c447e6686169..8ff885d620d04829a340a97b7922eeb56edb496b 100755
--- a/test/test262util.py
+++ b/test/runner/util_test262.py
@@ -15,65 +15,48 @@
# limitations under the License.
from glob import glob
-from os import path
-import os
+from os import path, getenv, makedirs
import re
-import shutil
-import subprocess
+import utils
-class Test262Util:
+TEST262_URL = "TEST262_URL"
+TEST262_REVISION = "TEST262_REVISION"
+
+
+class UtilTest262:
def __init__(self):
self.header = re.compile(
- r"\/\*---(?P.+)---\*\/", re.DOTALL)
- self.includes = re.compile(r"includes:\s+\[(?P.+)\]")
+ r"/\*---(?P.+)---\*/", re.DOTALL)
+ self.includes = re.compile(r"includes:\s+\[(?P.+)]")
self.includes2 = re.compile(r"includes:(?P(\s+-[^-].+)+)")
- self.flags = re.compile(r"flags:\s+\[(?P.+)\]")
+ self.flags = re.compile(r"flags:\s+\[(?P.+)]")
self.negative = re.compile(
r"negative:.*phase:\s+(?P\w+).*type:\s+(?P\w+)",
re.DOTALL)
self.async_ok = re.compile(r"Test262:AsyncTestComplete")
- def generate(self, revision, build_dir, harness_path, show_progress):
- dest_path = path.join(build_dir, 'test262')
- stamp_file = path.join(dest_path, 'test262.stamp')
-
- if path.isfile(stamp_file):
- return dest_path
-
- test262_path = path.join(path.sep, 'tmp', 'test262-%s' % revision)
-
- if not path.exists(test262_path):
- archive_file = path.join(path.sep, 'tmp', 'test262.zip')
-
- print("Downloading test262")
-
- cmd = ['wget', '-q', '-O', archive_file,
- 'https://github.com/tc39/test262/archive/%s.zip' % revision]
-
- if show_progress:
- cmd.append('--show-progress')
-
- return_code = subprocess.call(cmd)
-
- if return_code:
- raise Exception('Downloading test262 repository failed.')
-
- print("Extracting archive")
- if path.isdir(test262_path):
- shutil.rmtree(test262_path)
-
- return_code = subprocess.call(
- ['unzip', '-q', '-d', path.join(path.sep, 'tmp'), archive_file])
-
- if return_code:
- raise Exception(
- 'Failed to unzip test262 repository')
-
- os.remove(archive_file)
-
+ self.test262_url = getenv(TEST262_URL)
+ self.test262_revision = getenv(TEST262_REVISION)
+ if self.test262_url is None:
+ raise EnvironmentError(f"No {TEST262_URL} environment variable set")
+ if self.test262_revision is None:
+ raise EnvironmentError(f"No {TEST262_REVISION} environment variable set")
+
+ def generate(self, build_dir, harness_path, show_progress, source_path=None):
+ return utils.generate(
+ name="test262",
+ url=self.test262_url,
+ revision=self.test262_revision,
+ build_dir=build_dir,
+ test_subdir="test",
+ show_progress=show_progress,
+ process_copy=lambda src, dst: self.prepare_tests(src, dst, harness_path, path.dirname(src)),
+ source_path=source_path
+ )
+
+ def prepare_tests(self, src_path, dest_path, harness_path, test262_path):
print("Generating tests")
- src_path = path.join(test262_path, 'test')
glob_expression = path.join(src_path, "**/*.js")
files = glob(glob_expression, recursive=True)
@@ -82,18 +65,13 @@ class Test262Util:
with open(harness_path, 'r') as fp:
harness = fp.read()
- harness = harness.replace('$SOURCE', '`%s`' % harness)
+ harness = harness.replace('$SOURCE', f'`{harness}`')
for src_file in files:
dest_file = src_file.replace(src_path, dest_path)
- os.makedirs(path.dirname(dest_file), exist_ok=True)
+ makedirs(path.dirname(dest_file), exist_ok=True)
self.create_file(src_file, dest_file, harness, test262_path)
- with open(stamp_file, 'w') as fp:
- pass
-
- return dest_path
-
def create_file(self, input_file, output_file, harness, test262_dir):
with open(input_file, 'r') as fp:
input_str = fp.read()
@@ -169,7 +147,7 @@ class Test262Util:
def validate_runtime_result(self, return_code, std_err, desc, out):
is_negative = (desc['negative_phase'] == 'runtime') or (
- desc['negative_phase'] == 'resolution')
+ desc['negative_phase'] == 'resolution')
if return_code == 0: # passed
if is_negative:
diff --git a/test/runner/utils.py b/test/runner/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..51acf3e999a88faf9f00190737d90316e7ba904a
--- /dev/null
+++ b/test/runner/utils.py
@@ -0,0 +1,92 @@
+from os import makedirs, path, remove
+import os
+import shutil
+import subprocess
+
+
+def download(name, git_url, revision, target_path, show_progress=False):
+ archive_file = path.join(path.sep, 'tmp', f'{name}.zip')
+ url_file = f'{git_url}/{revision}.zip'
+
+ print(f"Downloading from {url_file} to {archive_file}")
+ cmd = ['wget', '-q', '-O', archive_file, url_file]
+
+ if show_progress:
+ cmd.append('--show-progress')
+
+ return_code = subprocess.call(cmd)
+
+ if return_code:
+ raise Exception(f'Downloading {url_file} file failed.')
+
+ print(f"Extracting archive {archive_file}")
+ if path.isdir(target_path):
+ shutil.rmtree(target_path)
+
+ return_code = subprocess.call(
+ ['unzip', '-q', '-d', target_path, archive_file])
+
+ if return_code:
+ raise Exception(f'Failed to unzip {archive_file} file')
+
+ remove(archive_file)
+
+
+def generate(name, url, revision, build_dir, test_subdir="test", show_progress=False, process_copy=None, source_path=None):
+ dest_path = path.join(build_dir, name)
+ makedirs(dest_path, exist_ok=True)
+ stamp_file = path.join(dest_path, f'{name}.stamp')
+
+ if path.isfile(stamp_file):
+ return dest_path
+
+ temp_path = path.join(path.sep, 'tmp', name)
+
+ if not path.exists(temp_path):
+ download(
+ name,
+ url,
+ revision,
+ temp_path,
+ show_progress
+ )
+
+ temp_path = path.join(temp_path, f'{name}-{revision}')
+
+ if process_copy is not None:
+ process_copy(
+ path.join(temp_path, test_subdir),
+ dest_path
+ )
+ else:
+ copy(
+ path.join(temp_path, test_subdir),
+ dest_path
+ )
+
+ with open(stamp_file, 'w+') as fp:
+ """
+ Create empty file-marker and close it at once
+ """
+ pass
+
+ return dest_path
+
+
+def copy(source_path, dest_path):
+ try:
+ shutil.rmtree(dest_path)
+ shutil.copytree(source_path, dest_path)
+ except Exception as e:
+ print(e)
+
+
+def write_2_file(file_path, content):
+ fd = os.open(file_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC)
+ file = os.fdopen(fd, "w+")
+ file.write(content)
+ file.close()
+
+
+def purify(line):
+ return line.strip(" \n").replace(" ", "")
diff --git a/test/test262skiplist.txt b/test/test262-excluded.txt
similarity index 99%
rename from test/test262skiplist.txt
rename to test/test262-excluded.txt
index d46dc95215842383b2d73f5b3224baf3fa10b0c6..a3ccdd1cfca7029db704dd981a3c760570f36149 100644
--- a/test/test262skiplist.txt
+++ b/test/test262-excluded.txt
@@ -3354,6 +3354,7 @@ built-ins/TypedArray/prototype/fill/BigInt/return-abrupt-from-start-as-symbol.js
built-ins/TypedArray/prototype/fill/BigInt/return-abrupt-from-start.js
built-ins/TypedArray/prototype/fill/BigInt/return-abrupt-from-this-out-of-bounds.js
built-ins/TypedArray/prototype/fill/BigInt/return-this.js
+built-ins/TypedArray/prototype/filter/callbackfn-not-callable-throws.js
built-ins/TypedArray/prototype/filter/BigInt/callbackfn-arguments-with-thisarg.js
built-ins/TypedArray/prototype/filter/BigInt/callbackfn-arguments-without-thisarg.js
built-ins/TypedArray/prototype/filter/BigInt/callbackfn-no-iteration-over-non-integer.js
@@ -3370,6 +3371,7 @@ built-ins/TypedArray/prototype/filter/BigInt/speciesctor-get-species-custom-ctor
built-ins/TypedArray/prototype/filter/BigInt/speciesctor-get-species-custom-ctor.js
built-ins/TypedArray/prototype/filter/BigInt/values-are-not-cached.js
built-ins/TypedArray/prototype/filter/BigInt/values-are-set.js
+built-ins/TypedArray/prototype/find/predicate-call-changes-value.js
built-ins/TypedArray/prototype/find/BigInt/get-length-ignores-length-prop.js
built-ins/TypedArray/prototype/find/BigInt/predicate-call-changes-value.js
built-ins/TypedArray/prototype/find/BigInt/predicate-call-parameters.js
@@ -3520,6 +3522,7 @@ built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-src-byteoffset-internal
built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-target-arraylength-internal.js
built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-target-byteoffset-internal.js
built-ins/TypedArray/prototype/set/src-typedarray-big-throws.js
+built-ins/TypedArray/prototype/slice/speciesctor-get-ctor-returns-throws.js
built-ins/TypedArray/prototype/slice/BigInt/arraylength-internal.js
built-ins/TypedArray/prototype/slice/BigInt/infinity.js
built-ins/TypedArray/prototype/slice/BigInt/minus-zero.js
@@ -3552,6 +3555,7 @@ built-ins/TypedArray/prototype/sort/BigInt/comparefn-nonfunction-call-throws.js
built-ins/TypedArray/prototype/sort/BigInt/return-same-instance.js
built-ins/TypedArray/prototype/sort/BigInt/sortcompare-with-no-tostring.js
built-ins/TypedArray/prototype/sort/BigInt/sorted-values.js
+built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor-returns-throws.js
built-ins/TypedArray/prototype/subarray/BigInt/infinity.js
built-ins/TypedArray/prototype/subarray/BigInt/minus-zero.js
built-ins/TypedArray/prototype/subarray/BigInt/result-does-not-copy-ordinary-properties.js
@@ -3685,6 +3689,9 @@ built-ins/TypedArrayConstructors/of/BigInt/new-instance-using-custom-ctor.js
built-ins/TypedArrayConstructors/of/BigInt/new-instance.js
built-ins/TypedArrayConstructors/of/BigInt/this-is-not-constructor.js
harness/compare-array-message.js
+harness/deepEqual-deep.js
+harness/deepEqual-mapset.js
+harness/deepEqual-primitives.js
harness/deepEqual-primitives-bigint.js
harness/temporalHelpers-one-shift-time-zone.js
intl402/BigInt/prototype/toLocaleString/de-DE.js
@@ -6294,6 +6301,7 @@ language/expressions/left-shift/bigint-non-primitive.js
language/expressions/left-shift/bigint-toprimitive.js
language/expressions/left-shift/bigint-wrapped-values.js
language/expressions/left-shift/bigint.js
+language/expressions/left-shift/S11.7.1_A4_T3.js
language/expressions/less-than-or-equal/bigint-and-bigint.js
language/expressions/less-than-or-equal/bigint-and-incomparable-string.js
language/expressions/less-than-or-equal/bigint-and-non-finite.js
@@ -7213,7 +7221,6 @@ language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-init-skipp
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-init-undef.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-complete.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-done.js
-language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-val-array-prototype.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-val.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-obj-id-init.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-obj-id.js
@@ -7226,7 +7233,6 @@ language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-ary-elem.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-ary-elision.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-ary-empty.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-ary-rest.js
-language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id-direct.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id-elision.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id-exhausted.js
language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id.js
@@ -7294,7 +7300,6 @@ language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-init-ski
language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-init-undef.js
language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-complete.js
language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-done.js
-language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-val-array-prototype.js
language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-val.js
language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-obj-id-init.js
language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-obj-id.js
diff --git a/test/test262skiplist-flaky-AOT-FULL.txt b/test/test262-flaky-ignored-AOT-FULL.txt
similarity index 62%
rename from test/test262skiplist-flaky-AOT-FULL.txt
rename to test/test262-flaky-ignored-AOT-FULL.txt
index 9df8291a427895c75fe32aa2c215b11725bcee30..847e983133c2d2ff59804cec7f12b2f0dabbab47 100644
--- a/test/test262skiplist-flaky-AOT-FULL.txt
+++ b/test/test262-flaky-ignored-AOT-FULL.txt
@@ -1,41 +1,3 @@
-# AOT fail issue #8217
-language/expressions/call/tco-call-args.js
-language/expressions/call/tco-member-args.js
-language/expressions/comma/tco-final.js
-language/expressions/conditional/tco-cond.js
-language/expressions/conditional/tco-pos.js
-language/expressions/logical-and/tco-right.js
-language/expressions/logical-or/tco-right.js
-language/expressions/tco-pos.js
-language/statements/block/tco-stmt-list.js
-language/statements/block/tco-stmt.js
-language/statements/do-while/tco-body.js
-language/statements/for/tco-const-body.js
-language/statements/for/tco-let-body.js
-language/statements/for/tco-lhs-body.js
-language/statements/for/tco-var-body.js
-language/statements/if/tco-else-body.js
-language/statements/if/tco-if-body.js
-language/statements/labeled/tco.js
-language/statements/return/tco.js
-language/statements/switch/tco-case-body-dflt.js
-language/statements/switch/tco-case-body.js
-language/statements/try/tco-catch-finally.js
-language/statements/try/tco-catch.js
-language/statements/try/tco-finally.js
-language/statements/while/tco-body.js
-# fail #9108
-intl402/PluralRules/constructor-options-throwing-getters.js
-intl402/PluralRules/default-options-object-prototype.js
-intl402/PluralRules/internals.js
-intl402/PluralRules/prototype/resolvedOptions/order.js
-intl402/PluralRules/prototype/resolvedOptions/pluralCategories.js
-intl402/PluralRules/prototype/resolvedOptions/properties.js
-intl402/PluralRules/prototype/selectRange/undefined-arguments-throws.js
-intl402/PluralRules/prototype/toStringTag/toString-changed-tag.js
-intl402/PluralRules/prototype/toStringTag/toString-removed-tag.js
-intl402/PluralRules/prototype/toStringTag/toString.js
-intl402/PluralRules/supportedLocalesOf/arguments.js
# fail precision #9110
built-ins/Date/UTC/fp-evaluation-order.js
# panda#9132
@@ -75,4 +37,5 @@ language/expressions/async-function/nameless-dflt-params-arg-val-not-undefined.j
language/expressions/async-function/named-dflt-params-arg-val-undefined.js
# RUNTIME_FAIL panda#9303
built-ins/JSON/stringify/value-array-proxy.js
-
+# RUNTIME_TIMEOUT
+built-ins/String/prototype/repeat/repeat-string-n-time.js
diff --git a/test/test262skiplist-flaky-AOT.txt b/test/test262-flaky-ignored-AOT.txt
similarity index 62%
rename from test/test262skiplist-flaky-AOT.txt
rename to test/test262-flaky-ignored-AOT.txt
index c681d700825e72ad804e7bdd786eeb801b108560..eecef044872b526c7c938da77be414f88f4b32cd 100644
--- a/test/test262skiplist-flaky-AOT.txt
+++ b/test/test262-flaky-ignored-AOT.txt
@@ -1,41 +1,3 @@
-# AOT fail issue #8217
-language/expressions/call/tco-call-args.js
-language/expressions/call/tco-member-args.js
-language/expressions/comma/tco-final.js
-language/expressions/conditional/tco-cond.js
-language/expressions/conditional/tco-pos.js
-language/expressions/logical-and/tco-right.js
-language/expressions/logical-or/tco-right.js
-language/expressions/tco-pos.js
-language/statements/block/tco-stmt-list.js
-language/statements/block/tco-stmt.js
-language/statements/do-while/tco-body.js
-language/statements/for/tco-const-body.js
-language/statements/for/tco-let-body.js
-language/statements/for/tco-lhs-body.js
-language/statements/for/tco-var-body.js
-language/statements/if/tco-else-body.js
-language/statements/if/tco-if-body.js
-language/statements/labeled/tco.js
-language/statements/return/tco.js
-language/statements/switch/tco-case-body-dflt.js
-language/statements/switch/tco-case-body.js
-language/statements/try/tco-catch-finally.js
-language/statements/try/tco-catch.js
-language/statements/try/tco-finally.js
-language/statements/while/tco-body.js
-# fail #9108
-intl402/PluralRules/constructor-options-throwing-getters.js
-intl402/PluralRules/default-options-object-prototype.js
-intl402/PluralRules/internals.js
-intl402/PluralRules/prototype/resolvedOptions/order.js
-intl402/PluralRules/prototype/resolvedOptions/pluralCategories.js
-intl402/PluralRules/prototype/resolvedOptions/properties.js
-intl402/PluralRules/prototype/selectRange/undefined-arguments-throws.js
-intl402/PluralRules/prototype/toStringTag/toString-changed-tag.js
-intl402/PluralRules/prototype/toStringTag/toString-removed-tag.js
-intl402/PluralRules/prototype/toStringTag/toString.js
-intl402/PluralRules/supportedLocalesOf/arguments.js
# fail precision #9110
built-ins/Date/UTC/fp-evaluation-order.js
# panda#9132
@@ -75,5 +37,3 @@ language/expressions/async-function/nameless-dflt-params-arg-val-not-undefined.j
language/expressions/async-function/named-dflt-params-arg-val-undefined.js
# RUNTIME_FAIL panda#9303
built-ins/JSON/stringify/value-array-proxy.js
-
-
diff --git a/test/test262-flaky-ignored-INT.txt b/test/test262-flaky-ignored-INT.txt
new file mode 100644
index 0000000000000000000000000000000000000000..74c7e98b486913032b7e2b5518c6c00537f0b6b3
--- /dev/null
+++ b/test/test262-flaky-ignored-INT.txt
@@ -0,0 +1,4 @@
+language/expressions/call/tco-call-args.js
+# fail precision #9110
+built-ins/Date/UTC/fp-evaluation-order.js
+
diff --git a/test/test262-flaky-ignored-IRTOC.txt b/test/test262-flaky-ignored-IRTOC.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e3338a75d55722652a0f83a72d97a3b343246df7
--- /dev/null
+++ b/test/test262-flaky-ignored-IRTOC.txt
@@ -0,0 +1,3 @@
+# fail precision #9110
+built-ins/Date/UTC/fp-evaluation-order.js
+
diff --git a/test/test262-flaky-ignored-JIT.txt b/test/test262-flaky-ignored-JIT.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ab9c261d01c66374bd3ebae0df7bcf49befc46f5
--- /dev/null
+++ b/test/test262-flaky-ignored-JIT.txt
@@ -0,0 +1,38 @@
+# fail precision #9110
+built-ins/Date/UTC/fp-evaluation-order.js
+# fail #9302
+language/expressions/left-shift/S11.7.1_A4_T3.js
+language/expressions/left-shift/S11.7.1_A4_T4.js
+language/expressions/right-shift/S11.7.2_A4_T3.js
+language/expressions/right-shift/S11.7.2_A4_T4.js
+language/expressions/unsigned-right-shift/S11.7.3_A4_T1.js
+language/expressions/unsigned-right-shift/S11.7.3_A4_T2.js
+language/expressions/unsigned-right-shift/S11.7.3_A4_T3.js
+language/expressions/unsigned-right-shift/S11.7.3_A4_T4.js
+# AOT fail issue #8217
+language/expressions/call/tco-call-args.js
+language/expressions/call/tco-member-args.js
+language/expressions/comma/tco-final.js
+language/expressions/conditional/tco-cond.js
+language/expressions/conditional/tco-pos.js
+language/expressions/logical-and/tco-right.js
+language/expressions/logical-or/tco-right.js
+language/expressions/tco-pos.js
+language/statements/block/tco-stmt-list.js
+language/statements/block/tco-stmt.js
+language/statements/do-while/tco-body.js
+language/statements/for/tco-const-body.js
+language/statements/for/tco-let-body.js
+language/statements/for/tco-lhs-body.js
+language/statements/for/tco-var-body.js
+language/statements/if/tco-else-body.js
+language/statements/if/tco-if-body.js
+language/statements/labeled/tco.js
+language/statements/return/tco.js
+language/statements/switch/tco-case-body-dflt.js
+language/statements/switch/tco-case-body.js
+language/statements/try/tco-catch-finally.js
+language/statements/try/tco-catch.js
+language/statements/try/tco-finally.js
+language/statements/while/tco-body.js
+
diff --git a/test/test262skiplist-flaky-JIT.txt b/test/test262-ignored-AOT-FULL.txt
similarity index 76%
rename from test/test262skiplist-flaky-JIT.txt
rename to test/test262-ignored-AOT-FULL.txt
index 02e4638a0ba1c9e06033d4e21546c592c6199e1b..20a1b21d543f78774369580820b1dc3a80db09e8 100644
--- a/test/test262skiplist-flaky-JIT.txt
+++ b/test/test262-ignored-AOT-FULL.txt
@@ -1,26 +1,3 @@
-# fail #9108
-intl402/PluralRules/constructor-options-throwing-getters.js
-intl402/PluralRules/default-options-object-prototype.js
-intl402/PluralRules/internals.js
-intl402/PluralRules/prototype/resolvedOptions/order.js
-intl402/PluralRules/prototype/resolvedOptions/pluralCategories.js
-intl402/PluralRules/prototype/resolvedOptions/properties.js
-intl402/PluralRules/prototype/selectRange/undefined-arguments-throws.js
-intl402/PluralRules/prototype/toStringTag/toString-changed-tag.js
-intl402/PluralRules/prototype/toStringTag/toString-removed-tag.js
-intl402/PluralRules/prototype/toStringTag/toString.js
-intl402/PluralRules/supportedLocalesOf/arguments.js
-# fail precision #9110
-built-ins/Date/UTC/fp-evaluation-order.js
-# fail #9302
-language/expressions/left-shift/S11.7.1_A4_T3.js
-language/expressions/left-shift/S11.7.1_A4_T4.js
-language/expressions/right-shift/S11.7.2_A4_T3.js
-language/expressions/right-shift/S11.7.2_A4_T4.js
-language/expressions/unsigned-right-shift/S11.7.3_A4_T1.js
-language/expressions/unsigned-right-shift/S11.7.3_A4_T2.js
-language/expressions/unsigned-right-shift/S11.7.3_A4_T3.js
-language/expressions/unsigned-right-shift/S11.7.3_A4_T4.js
# AOT fail issue #8217
language/expressions/call/tco-call-args.js
language/expressions/call/tco-member-args.js
@@ -47,4 +24,23 @@ language/statements/try/tco-catch-finally.js
language/statements/try/tco-catch.js
language/statements/try/tco-finally.js
language/statements/while/tco-body.js
-
+# fail #9108
+intl402/PluralRules/constructor-options-throwing-getters.js
+intl402/PluralRules/default-options-object-prototype.js
+intl402/PluralRules/internals.js
+intl402/PluralRules/prototype/resolvedOptions/order.js
+intl402/PluralRules/prototype/resolvedOptions/pluralCategories.js
+intl402/PluralRules/prototype/resolvedOptions/properties.js
+intl402/PluralRules/prototype/selectRange/undefined-arguments-throws.js
+intl402/PluralRules/prototype/toStringTag/toString-changed-tag.js
+intl402/PluralRules/prototype/toStringTag/toString-removed-tag.js
+intl402/PluralRules/prototype/toStringTag/toString.js
+intl402/PluralRules/supportedLocalesOf/arguments.js
+# #9380
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9382
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id-direct.js
+# #9381
+language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9401
+built-ins/String/prototype/repeat/repeat-string-n-times.js
diff --git a/test/test262-ignored-AOT.txt b/test/test262-ignored-AOT.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4e3134a90036072121b7cdf23422dc5f97726738
--- /dev/null
+++ b/test/test262-ignored-AOT.txt
@@ -0,0 +1,44 @@
+# AOT fail issue #8217
+language/expressions/call/tco-call-args.js
+language/expressions/call/tco-member-args.js
+language/expressions/comma/tco-final.js
+language/expressions/conditional/tco-cond.js
+language/expressions/conditional/tco-pos.js
+language/expressions/logical-and/tco-right.js
+language/expressions/logical-or/tco-right.js
+language/expressions/tco-pos.js
+language/statements/block/tco-stmt-list.js
+language/statements/block/tco-stmt.js
+language/statements/do-while/tco-body.js
+language/statements/for/tco-const-body.js
+language/statements/for/tco-let-body.js
+language/statements/for/tco-lhs-body.js
+language/statements/for/tco-var-body.js
+language/statements/if/tco-else-body.js
+language/statements/if/tco-if-body.js
+language/statements/labeled/tco.js
+language/statements/return/tco.js
+language/statements/switch/tco-case-body-dflt.js
+language/statements/switch/tco-case-body.js
+language/statements/try/tco-catch-finally.js
+language/statements/try/tco-catch.js
+language/statements/try/tco-finally.js
+language/statements/while/tco-body.js
+# fail #9108
+intl402/PluralRules/constructor-options-throwing-getters.js
+intl402/PluralRules/default-options-object-prototype.js
+intl402/PluralRules/internals.js
+intl402/PluralRules/prototype/resolvedOptions/order.js
+intl402/PluralRules/prototype/resolvedOptions/pluralCategories.js
+intl402/PluralRules/prototype/resolvedOptions/properties.js
+intl402/PluralRules/prototype/selectRange/undefined-arguments-throws.js
+intl402/PluralRules/prototype/toStringTag/toString-changed-tag.js
+intl402/PluralRules/prototype/toStringTag/toString-removed-tag.js
+intl402/PluralRules/prototype/toStringTag/toString.js
+intl402/PluralRules/supportedLocalesOf/arguments.js
+# #9380
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9381
+language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9382
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id-direct.js
diff --git a/test/test262skiplist-flaky-INT.txt b/test/test262-ignored-INT.txt
similarity index 68%
rename from test/test262skiplist-flaky-INT.txt
rename to test/test262-ignored-INT.txt
index a1be888774bb6876d1ff5ab923d1ce5c3da2d657..86fd57efe368020f96ed9460e750bcdfe90bc0bf 100644
--- a/test/test262skiplist-flaky-INT.txt
+++ b/test/test262-ignored-INT.txt
@@ -10,6 +10,9 @@ intl402/PluralRules/prototype/toStringTag/toString-changed-tag.js
intl402/PluralRules/prototype/toStringTag/toString-removed-tag.js
intl402/PluralRules/prototype/toStringTag/toString.js
intl402/PluralRules/supportedLocalesOf/arguments.js
-# fail precision #9110
-built-ins/Date/UTC/fp-evaluation-order.js
-
+# #9380
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9381
+language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9382
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id-direct.js
diff --git a/test/test262skiplist-flaky-IRTOC.txt b/test/test262-ignored-IRTOC.txt
similarity index 65%
rename from test/test262skiplist-flaky-IRTOC.txt
rename to test/test262-ignored-IRTOC.txt
index 2378f254f2113039f15722a6d54585606affaae5..431ccd06883aa1e4294b03ec523db29bf8cbbe9c 100644
--- a/test/test262skiplist-flaky-IRTOC.txt
+++ b/test/test262-ignored-IRTOC.txt
@@ -56,6 +56,37 @@ language/module-code/instn-local-bndng-export-fun.js
language/module-code/instn-local-bndng-export-gen.js
language/module-code/instn-local-bndng-export-let.js
language/module-code/instn-local-bndng-export-var.js
-# fail precision #9110
-built-ins/Date/UTC/fp-evaluation-order.js
-
+# #9380
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9381
+language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9383
+built-ins/Array/prototype/methods-called-as-functions.js
+# #9384
+built-ins/TypedArrayConstructors/internals/DefineOwnProperty/key-is-not-numeric-index-throws.js
+# #9385
+built-ins/TypedArrayConstructors/internals/Set/conversion-operation.js
+# #9386
+harness/deepEqual-array.js
+# #9387
+language/expressions/class/async-gen-method/yield-star-async-return.js
+# #9388
+language/expressions/class/async-gen-method-static/yield-star-async-return.js
+# #9389
+language/expressions/class/elements/after-same-line-static-async-gen-literal-names-asi.js
+# #9390
+language/expressions/object/method-definition/async-gen-yield-star-async-throw.js
+# #9391
+language/statements/class/async-gen-method/yield-star-async-return.js
+# #9392
+language/statements/class/async-gen-method-static/yield-star-async-return.js
+# #9393
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id-direct.js
+# #9394
+language/statements/for-await-of/async-gen-dstr-const-async-obj-ptrn-rest-skip-non-enumerable.js
+# #9395
+language/statements/for-await-of/async-gen-dstr-let-async-obj-ptrn-rest-skip-non-enumerable.js
+# #9396
+language/statements/for-await-of/async-gen-dstr-var-async-obj-ptrn-rest-skip-non-enumerable.js
+# #9402 #9403
+built-ins/TypedArrayConstructors/internals/DefineOwnProperty/conversion-operation.js
diff --git a/test/test262-ignored-JIT.txt b/test/test262-ignored-JIT.txt
new file mode 100644
index 0000000000000000000000000000000000000000..86fd57efe368020f96ed9460e750bcdfe90bc0bf
--- /dev/null
+++ b/test/test262-ignored-JIT.txt
@@ -0,0 +1,18 @@
+# fail #9108
+intl402/PluralRules/constructor-options-throwing-getters.js
+intl402/PluralRules/default-options-object-prototype.js
+intl402/PluralRules/internals.js
+intl402/PluralRules/prototype/resolvedOptions/order.js
+intl402/PluralRules/prototype/resolvedOptions/pluralCategories.js
+intl402/PluralRules/prototype/resolvedOptions/properties.js
+intl402/PluralRules/prototype/selectRange/undefined-arguments-throws.js
+intl402/PluralRules/prototype/toStringTag/toString-changed-tag.js
+intl402/PluralRules/prototype/toStringTag/toString-removed-tag.js
+intl402/PluralRules/prototype/toStringTag/toString.js
+intl402/PluralRules/supportedLocalesOf/arguments.js
+# #9380
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9381
+language/statements/class/dstr/private-gen-meth-static-ary-ptrn-elem-id-iter-val-array-prototype.js
+# #9382
+language/statements/class/dstr/private-gen-meth-dflt-ary-ptrn-rest-id-direct.js
diff --git a/test/test262-ignored.txt b/test/test262-ignored.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f03d47c7566eb3a2037cd576241a16fff8f167dd
--- /dev/null
+++ b/test/test262-ignored.txt
@@ -0,0 +1,13 @@
+# panda #9301
+harness/deepEqual-object.js
+built-ins/TypedArrayConstructors/internals/Get/key-is-not-canonical-index.js
+built-ins/TypedArrayConstructors/from/mapfn-is-not-callable.js
+
+# panda #9300
+language/expressions/class/async-gen-method/yield-star-async-throw.js
+language/expressions/class/async-gen-method-static/yield-star-async-throw.js
+language/statements/class/async-gen-method/yield-star-async-throw.js
+language/statements/class/async-gen-method-static/yield-star-async-throw.js
+language/statements/for-await-of/async-func-decl-dstr-obj-rest-skip-non-enumerable.js
+
+language/expressions/left-shift/S11.7.1_A4_T4.js
diff --git a/test/test262skiplist-long.txt b/test/test262-long-excluded.txt
similarity index 100%
rename from test/test262skiplist-long.txt
rename to test/test262-long-excluded.txt